2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
36 void QEMU_NORETURN
tcg_s390_program_interrupt(CPUS390XState
*env
, uint32_t code
,
37 int ilen
, uintptr_t ra
)
39 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
41 cpu_restore_state(cs
, ra
, true);
42 qemu_log_mask(CPU_LOG_INT
, "program interrupt at %#" PRIx64
"\n",
44 trigger_pgm_exception(env
, code
, ilen
);
48 void QEMU_NORETURN
tcg_s390_data_exception(CPUS390XState
*env
, uint32_t dxc
,
51 g_assert(dxc
<= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53 /* Store the DXC into the lowcore */
54 stl_phys(CPU(s390_env_get_cpu(env
))->as
,
55 env
->psa
+ offsetof(LowCore
, data_exc_code
), dxc
);
58 /* Store the DXC into the FPC if AFP is enabled */
59 if (env
->cregs
[0] & CR0_AFP
) {
60 env
->fpc
= deposit32(env
->fpc
, 8, 8, dxc
);
62 tcg_s390_program_interrupt(env
, PGM_DATA
, ILEN_AUTO
, ra
);
65 void HELPER(data_exception
)(CPUS390XState
*env
, uint32_t dxc
)
67 tcg_s390_data_exception(env
, dxc
, GETPC());
70 #if defined(CONFIG_USER_ONLY)
72 void s390_cpu_do_interrupt(CPUState
*cs
)
74 cs
->exception_index
= -1;
77 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
78 MMUAccessType access_type
, int mmu_idx
,
79 bool probe
, uintptr_t retaddr
)
81 S390CPU
*cpu
= S390_CPU(cs
);
83 trigger_pgm_exception(&cpu
->env
, PGM_ADDRESSING
, ILEN_AUTO
);
84 /* On real machines this value is dropped into LowMem. Since this
85 is userland, simply put this someplace that cpu_loop can find it. */
86 cpu
->env
.__excp_addr
= address
;
87 cpu_loop_exit_restore(cs
, retaddr
);
90 #else /* !CONFIG_USER_ONLY */
92 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx
)
96 return PSW_ASC_PRIMARY
;
97 case MMU_SECONDARY_IDX
:
98 return PSW_ASC_SECONDARY
;
106 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
107 MMUAccessType access_type
, int mmu_idx
,
108 bool probe
, uintptr_t retaddr
)
110 S390CPU
*cpu
= S390_CPU(cs
);
111 CPUS390XState
*env
= &cpu
->env
;
112 target_ulong vaddr
, raddr
;
116 qemu_log_mask(CPU_LOG_MMU
, "%s: addr 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
117 __func__
, address
, access_type
, mmu_idx
);
121 if (mmu_idx
< MMU_REAL_IDX
) {
122 asc
= cpu_mmu_idx_to_asc(mmu_idx
);
124 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
127 fail
= mmu_translate(env
, vaddr
, access_type
, asc
, &raddr
, &prot
, true);
128 } else if (mmu_idx
== MMU_REAL_IDX
) {
130 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
133 fail
= mmu_translate_real(env
, vaddr
, access_type
, &raddr
, &prot
);
135 g_assert_not_reached();
138 /* check out of RAM access */
140 !address_space_access_valid(&address_space_memory
, raddr
,
141 TARGET_PAGE_SIZE
, access_type
,
142 MEMTXATTRS_UNSPECIFIED
)) {
143 qemu_log_mask(CPU_LOG_MMU
,
144 "%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n",
145 __func__
, (uint64_t)raddr
, (uint64_t)ram_size
);
146 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_AUTO
);
151 qemu_log_mask(CPU_LOG_MMU
,
152 "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
153 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
154 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
, raddr
, prot
,
155 mmu_idx
, TARGET_PAGE_SIZE
);
162 cpu_restore_state(cs
, retaddr
, true);
165 * The ILC value for code accesses is undefined. The important
166 * thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO,
167 * which would cause do_program_interrupt to attempt to read from
168 * env->psw.addr again. C.f. the condition in trigger_page_fault,
169 * but is not universally applied.
171 * ??? If we remove ILEN_AUTO, by moving the computation of ILEN
172 * into cpu_restore_state, then we may remove this entirely.
174 if (access_type
== MMU_INST_FETCH
) {
175 env
->int_pgm_ilen
= 2;
181 static void do_program_interrupt(CPUS390XState
*env
)
185 int ilen
= env
->int_pgm_ilen
;
187 if (ilen
== ILEN_AUTO
) {
188 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
190 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
192 switch (env
->int_pgm_code
) {
194 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
203 case PGM_SPECIFICATION
:
205 case PGM_FIXPT_OVERFLOW
:
206 case PGM_FIXPT_DIVIDE
:
207 case PGM_DEC_OVERFLOW
:
209 case PGM_HFP_EXP_OVERFLOW
:
210 case PGM_HFP_EXP_UNDERFLOW
:
211 case PGM_HFP_SIGNIFICANCE
:
217 case PGM_PC_TRANS_SPEC
:
220 /* advance the PSW if our exception is not nullifying */
221 env
->psw
.addr
+= ilen
;
225 qemu_log_mask(CPU_LOG_INT
,
226 "%s: code=0x%x ilen=%d psw: %" PRIx64
" %" PRIx64
"\n",
227 __func__
, env
->int_pgm_code
, ilen
, env
->psw
.mask
,
230 lowcore
= cpu_map_lowcore(env
);
232 /* Signal PER events with the exception. */
233 if (env
->per_perc_atmid
) {
234 env
->int_pgm_code
|= PGM_PER
;
235 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
236 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
237 env
->per_perc_atmid
= 0;
240 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
241 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
242 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
243 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
244 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
245 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
246 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
248 cpu_unmap_lowcore(lowcore
);
250 load_psw(env
, mask
, addr
);
253 static void do_svc_interrupt(CPUS390XState
*env
)
258 lowcore
= cpu_map_lowcore(env
);
260 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
261 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
262 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
263 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
264 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
265 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
267 cpu_unmap_lowcore(lowcore
);
269 load_psw(env
, mask
, addr
);
271 /* When a PER event is pending, the PER exception has to happen
272 immediately after the SERVICE CALL one. */
273 if (env
->per_perc_atmid
) {
274 env
->int_pgm_code
= PGM_PER
;
275 env
->int_pgm_ilen
= env
->int_svc_ilen
;
276 do_program_interrupt(env
);
280 #define VIRTIO_SUBCODE_64 0x0D00
282 static void do_ext_interrupt(CPUS390XState
*env
)
284 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
285 S390CPU
*cpu
= s390_env_get_cpu(env
);
290 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
291 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
294 lowcore
= cpu_map_lowcore(env
);
296 if ((env
->pending_int
& INTERRUPT_EMERGENCY_SIGNAL
) &&
297 (env
->cregs
[0] & CR0_EMERGENCY_SIGNAL_SC
)) {
298 lowcore
->ext_int_code
= cpu_to_be16(EXT_EMERGENCY
);
299 cpu_addr
= find_first_bit(env
->emergency_signals
, S390_MAX_CPUS
);
300 g_assert(cpu_addr
< S390_MAX_CPUS
);
301 lowcore
->cpu_addr
= cpu_to_be16(cpu_addr
);
302 clear_bit(cpu_addr
, env
->emergency_signals
);
303 if (bitmap_empty(env
->emergency_signals
, max_cpus
)) {
304 env
->pending_int
&= ~INTERRUPT_EMERGENCY_SIGNAL
;
306 } else if ((env
->pending_int
& INTERRUPT_EXTERNAL_CALL
) &&
307 (env
->cregs
[0] & CR0_EXTERNAL_CALL_SC
)) {
308 lowcore
->ext_int_code
= cpu_to_be16(EXT_EXTERNAL_CALL
);
309 lowcore
->cpu_addr
= cpu_to_be16(env
->external_call_addr
);
310 env
->pending_int
&= ~INTERRUPT_EXTERNAL_CALL
;
311 } else if ((env
->pending_int
& INTERRUPT_EXT_CLOCK_COMPARATOR
) &&
312 (env
->cregs
[0] & CR0_CKC_SC
)) {
313 lowcore
->ext_int_code
= cpu_to_be16(EXT_CLOCK_COMP
);
314 lowcore
->cpu_addr
= 0;
315 env
->pending_int
&= ~INTERRUPT_EXT_CLOCK_COMPARATOR
;
316 } else if ((env
->pending_int
& INTERRUPT_EXT_CPU_TIMER
) &&
317 (env
->cregs
[0] & CR0_CPU_TIMER_SC
)) {
318 lowcore
->ext_int_code
= cpu_to_be16(EXT_CPU_TIMER
);
319 lowcore
->cpu_addr
= 0;
320 env
->pending_int
&= ~INTERRUPT_EXT_CPU_TIMER
;
321 } else if (qemu_s390_flic_has_service(flic
) &&
322 (env
->cregs
[0] & CR0_SERVICE_SC
)) {
325 param
= qemu_s390_flic_dequeue_service(flic
);
326 lowcore
->ext_int_code
= cpu_to_be16(EXT_SERVICE
);
327 lowcore
->ext_params
= cpu_to_be32(param
);
328 lowcore
->cpu_addr
= 0;
330 g_assert_not_reached();
333 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
334 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
335 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
336 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
338 cpu_unmap_lowcore(lowcore
);
340 load_psw(env
, mask
, addr
);
343 static void do_io_interrupt(CPUS390XState
*env
)
345 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
350 g_assert(env
->psw
.mask
& PSW_MASK_IO
);
351 io
= qemu_s390_flic_dequeue_io(flic
, env
->cregs
[6]);
354 lowcore
= cpu_map_lowcore(env
);
356 lowcore
->subchannel_id
= cpu_to_be16(io
->id
);
357 lowcore
->subchannel_nr
= cpu_to_be16(io
->nr
);
358 lowcore
->io_int_parm
= cpu_to_be32(io
->parm
);
359 lowcore
->io_int_word
= cpu_to_be32(io
->word
);
360 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
361 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
362 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
363 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
365 cpu_unmap_lowcore(lowcore
);
368 load_psw(env
, mask
, addr
);
371 typedef struct MchkExtSaveArea
{
372 uint64_t vregs
[32][2]; /* 0x0000 */
373 uint8_t pad_0x0200
[0x0400 - 0x0200]; /* 0x0200 */
375 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea
) != 1024);
377 static int mchk_store_vregs(CPUS390XState
*env
, uint64_t mcesao
)
379 hwaddr len
= sizeof(MchkExtSaveArea
);
383 sa
= cpu_physical_memory_map(mcesao
, &len
, 1);
387 if (len
!= sizeof(MchkExtSaveArea
)) {
388 cpu_physical_memory_unmap(sa
, len
, 1, 0);
392 for (i
= 0; i
< 32; i
++) {
393 sa
->vregs
[i
][0] = cpu_to_be64(env
->vregs
[i
][0].ll
);
394 sa
->vregs
[i
][1] = cpu_to_be64(env
->vregs
[i
][1].ll
);
397 cpu_physical_memory_unmap(sa
, len
, 1, len
);
401 static void do_mchk_interrupt(CPUS390XState
*env
)
403 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
404 uint64_t mcic
= s390_build_validity_mcic() | MCIC_SC_CP
;
405 uint64_t mask
, addr
, mcesao
= 0;
409 /* for now we only support channel report machine checks (floating) */
410 g_assert(env
->psw
.mask
& PSW_MASK_MCHECK
);
411 g_assert(env
->cregs
[14] & CR14_CHANNEL_REPORT_SC
);
413 qemu_s390_flic_dequeue_crw_mchk(flic
);
415 lowcore
= cpu_map_lowcore(env
);
417 /* extended save area */
418 if (mcic
& MCIC_VB_VR
) {
419 /* length and alignment is 1024 bytes */
420 mcesao
= be64_to_cpu(lowcore
->mcesad
) & ~0x3ffull
;
423 /* try to store vector registers */
424 if (!mcesao
|| mchk_store_vregs(env
, mcesao
)) {
428 /* we are always in z/Architecture mode */
429 lowcore
->ar_access_id
= 1;
431 for (i
= 0; i
< 16; i
++) {
432 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(get_freg(env
, i
)->ll
);
433 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
434 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
435 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
437 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
438 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
439 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
440 lowcore
->cpu_timer_save_area
= cpu_to_be64(env
->cputm
);
441 lowcore
->clock_comp_save_area
= cpu_to_be64(env
->ckc
>> 8);
443 lowcore
->mcic
= cpu_to_be64(mcic
);
444 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
445 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
446 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
447 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
449 cpu_unmap_lowcore(lowcore
);
451 load_psw(env
, mask
, addr
);
454 void s390_cpu_do_interrupt(CPUState
*cs
)
456 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
457 S390CPU
*cpu
= S390_CPU(cs
);
458 CPUS390XState
*env
= &cpu
->env
;
459 bool stopped
= false;
461 qemu_log_mask(CPU_LOG_INT
, "%s: %d at psw=%" PRIx64
":%" PRIx64
"\n",
462 __func__
, cs
->exception_index
, env
->psw
.mask
, env
->psw
.addr
);
465 /* handle machine checks */
466 if (cs
->exception_index
== -1 && s390_cpu_has_mcck_int(cpu
)) {
467 cs
->exception_index
= EXCP_MCHK
;
469 /* handle external interrupts */
470 if (cs
->exception_index
== -1 && s390_cpu_has_ext_int(cpu
)) {
471 cs
->exception_index
= EXCP_EXT
;
473 /* handle I/O interrupts */
474 if (cs
->exception_index
== -1 && s390_cpu_has_io_int(cpu
)) {
475 cs
->exception_index
= EXCP_IO
;
477 /* RESTART interrupt */
478 if (cs
->exception_index
== -1 && s390_cpu_has_restart_int(cpu
)) {
479 cs
->exception_index
= EXCP_RESTART
;
481 /* STOP interrupt has least priority */
482 if (cs
->exception_index
== -1 && s390_cpu_has_stop_int(cpu
)) {
483 cs
->exception_index
= EXCP_STOP
;
486 switch (cs
->exception_index
) {
488 do_program_interrupt(env
);
491 do_svc_interrupt(env
);
494 do_ext_interrupt(env
);
497 do_io_interrupt(env
);
500 do_mchk_interrupt(env
);
503 do_restart_interrupt(env
);
506 do_stop_interrupt(env
);
511 if (cs
->exception_index
!= -1 && !stopped
) {
512 /* check if there are more pending interrupts to deliver */
513 cs
->exception_index
= -1;
516 cs
->exception_index
= -1;
518 /* we might still have pending interrupts, but not deliverable */
519 if (!env
->pending_int
&& !qemu_s390_flic_has_any(flic
)) {
520 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
523 /* WAIT PSW during interrupt injection or STOP interrupt */
524 if ((env
->psw
.mask
& PSW_MASK_WAIT
) || stopped
) {
525 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
526 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
527 } else if (cs
->halted
) {
528 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
529 s390_cpu_unhalt(cpu
);
533 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
535 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
536 S390CPU
*cpu
= S390_CPU(cs
);
537 CPUS390XState
*env
= &cpu
->env
;
540 /* Execution of the target insn is indivisible from
541 the parent EXECUTE insn. */
544 if (s390_cpu_has_int(cpu
)) {
545 s390_cpu_do_interrupt(cs
);
548 if (env
->psw
.mask
& PSW_MASK_WAIT
) {
549 /* Woken up because of a floating interrupt but it has already
550 * been delivered. Go back to sleep. */
551 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
557 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
559 S390CPU
*cpu
= S390_CPU(cs
);
560 CPUS390XState
*env
= &cpu
->env
;
561 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
563 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
564 /* FIXME: When the storage-alteration-space control bit is set,
565 the exception should only be triggered if the memory access
566 is done using an address space with the storage-alteration-event
567 bit set. We have no way to detect that with the current
569 cs
->watchpoint_hit
= NULL
;
571 env
->per_address
= env
->psw
.addr
;
572 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
573 /* FIXME: We currently no way to detect the address space used
574 to trigger the watchpoint. For now just consider it is the
575 current default ASC. This turn to be true except when MVCP
576 and MVCS instrutions are not used. */
577 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
579 /* Remove all watchpoints to re-execute the code. A PER exception
580 will be triggered, it will call load_psw which will recompute
582 cpu_watchpoint_remove_all(cs
, BP_CPU
);
583 cpu_loop_exit_noexc(cs
);
587 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
588 this is only for the atomic operations, for which we want to raise a
589 specification exception. */
590 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
591 MMUAccessType access_type
,
592 int mmu_idx
, uintptr_t retaddr
)
594 S390CPU
*cpu
= S390_CPU(cs
);
595 CPUS390XState
*env
= &cpu
->env
;
597 s390_program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
, retaddr
);
600 #endif /* CONFIG_USER_ONLY */