hw/timer/sse-timer: Model the SSE Subsystem System Timer
[qemu/ar7.git] / target / s390x / excp_helper.c
blobce16af394b1f70274d0a4a44e604b498d536a4fb
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
37 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
38 uint32_t code, uintptr_t ra)
40 CPUState *cs = env_cpu(env);
42 cpu_restore_state(cs, ra, true);
43 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44 env->psw.addr);
45 trigger_pgm_exception(env, code);
46 cpu_loop_exit(cs);
49 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50 uintptr_t ra)
52 g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54 /* Store the DXC into the lowcore */
55 stl_phys(env_cpu(env)->as,
56 env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
59 /* Store the DXC into the FPC if AFP is enabled */
60 if (env->cregs[0] & CR0_AFP) {
61 env->fpc = deposit32(env->fpc, 8, 8, dxc);
63 tcg_s390_program_interrupt(env, PGM_DATA, ra);
66 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67 uintptr_t ra)
69 g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71 /* Always store the VXC into the lowcore, without AFP it is undefined */
72 stl_phys(env_cpu(env)->as,
73 env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
76 /* Always store the VXC into the FPC, without AFP it is undefined */
77 env->fpc = deposit32(env->fpc, 8, 8, vxc);
78 tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
83 tcg_s390_data_exception(env, dxc, GETPC());
86 #if defined(CONFIG_USER_ONLY)
88 void s390_cpu_do_interrupt(CPUState *cs)
90 cs->exception_index = -1;
93 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
94 MMUAccessType access_type, int mmu_idx,
95 bool probe, uintptr_t retaddr)
97 S390CPU *cpu = S390_CPU(cs);
99 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
100 /* On real machines this value is dropped into LowMem. Since this
101 is userland, simply put this someplace that cpu_loop can find it. */
102 cpu->env.__excp_addr = address;
103 cpu_loop_exit_restore(cs, retaddr);
106 #else /* !CONFIG_USER_ONLY */
108 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
110 switch (mmu_idx) {
111 case MMU_PRIMARY_IDX:
112 return PSW_ASC_PRIMARY;
113 case MMU_SECONDARY_IDX:
114 return PSW_ASC_SECONDARY;
115 case MMU_HOME_IDX:
116 return PSW_ASC_HOME;
117 default:
118 abort();
122 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
123 MMUAccessType access_type, int mmu_idx,
124 bool probe, uintptr_t retaddr)
126 S390CPU *cpu = S390_CPU(cs);
127 CPUS390XState *env = &cpu->env;
128 target_ulong vaddr, raddr;
129 uint64_t asc, tec;
130 int prot, excp;
132 qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
133 __func__, address, access_type, mmu_idx);
135 vaddr = address;
137 if (mmu_idx < MMU_REAL_IDX) {
138 asc = cpu_mmu_idx_to_asc(mmu_idx);
139 /* 31-Bit mode */
140 if (!(env->psw.mask & PSW_MASK_64)) {
141 vaddr &= 0x7fffffff;
143 excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
144 } else if (mmu_idx == MMU_REAL_IDX) {
145 /* 31-Bit mode */
146 if (!(env->psw.mask & PSW_MASK_64)) {
147 vaddr &= 0x7fffffff;
149 excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
150 } else {
151 g_assert_not_reached();
154 /* check out of RAM access */
155 if (!excp &&
156 !address_space_access_valid(&address_space_memory, raddr,
157 TARGET_PAGE_SIZE, access_type,
158 MEMTXATTRS_UNSPECIFIED)) {
159 MachineState *ms = MACHINE(qdev_get_machine());
160 qemu_log_mask(CPU_LOG_MMU,
161 "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
162 __func__, (uint64_t)raddr, (uint64_t)ms->ram_size);
163 excp = PGM_ADDRESSING;
164 tec = 0; /* unused */
167 if (!excp) {
168 qemu_log_mask(CPU_LOG_MMU,
169 "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
170 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
171 tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
172 mmu_idx, TARGET_PAGE_SIZE);
173 return true;
175 if (probe) {
176 return false;
179 if (excp != PGM_ADDRESSING) {
180 stq_phys(env_cpu(env)->as,
181 env->psa + offsetof(LowCore, trans_exc_code), tec);
185 * For data accesses, ILEN will be filled in from the unwind info,
186 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
187 * and so unwinding will not occur. However, ILEN is also undefined
188 * for that case -- we choose to set ILEN = 2.
190 env->int_pgm_ilen = 2;
191 trigger_pgm_exception(env, excp);
192 cpu_loop_exit_restore(cs, retaddr);
195 static void do_program_interrupt(CPUS390XState *env)
197 uint64_t mask, addr;
198 LowCore *lowcore;
199 int ilen = env->int_pgm_ilen;
201 assert(ilen == 2 || ilen == 4 || ilen == 6);
203 switch (env->int_pgm_code) {
204 case PGM_PER:
205 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
206 break;
208 /* FALL THROUGH */
209 case PGM_OPERATION:
210 case PGM_PRIVILEGED:
211 case PGM_EXECUTE:
212 case PGM_PROTECTION:
213 case PGM_ADDRESSING:
214 case PGM_SPECIFICATION:
215 case PGM_DATA:
216 case PGM_FIXPT_OVERFLOW:
217 case PGM_FIXPT_DIVIDE:
218 case PGM_DEC_OVERFLOW:
219 case PGM_DEC_DIVIDE:
220 case PGM_HFP_EXP_OVERFLOW:
221 case PGM_HFP_EXP_UNDERFLOW:
222 case PGM_HFP_SIGNIFICANCE:
223 case PGM_HFP_DIVIDE:
224 case PGM_TRANS_SPEC:
225 case PGM_SPECIAL_OP:
226 case PGM_OPERAND:
227 case PGM_HFP_SQRT:
228 case PGM_PC_TRANS_SPEC:
229 case PGM_ALET_SPEC:
230 case PGM_MONITOR:
231 /* advance the PSW if our exception is not nullifying */
232 env->psw.addr += ilen;
233 break;
236 qemu_log_mask(CPU_LOG_INT,
237 "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
238 __func__, env->int_pgm_code, ilen, env->psw.mask,
239 env->psw.addr);
241 lowcore = cpu_map_lowcore(env);
243 /* Signal PER events with the exception. */
244 if (env->per_perc_atmid) {
245 env->int_pgm_code |= PGM_PER;
246 lowcore->per_address = cpu_to_be64(env->per_address);
247 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
248 env->per_perc_atmid = 0;
251 lowcore->pgm_ilen = cpu_to_be16(ilen);
252 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
253 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
254 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
255 mask = be64_to_cpu(lowcore->program_new_psw.mask);
256 addr = be64_to_cpu(lowcore->program_new_psw.addr);
257 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
259 cpu_unmap_lowcore(lowcore);
261 load_psw(env, mask, addr);
264 static void do_svc_interrupt(CPUS390XState *env)
266 uint64_t mask, addr;
267 LowCore *lowcore;
269 lowcore = cpu_map_lowcore(env);
271 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
272 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
273 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
274 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
275 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
276 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
278 cpu_unmap_lowcore(lowcore);
280 load_psw(env, mask, addr);
282 /* When a PER event is pending, the PER exception has to happen
283 immediately after the SERVICE CALL one. */
284 if (env->per_perc_atmid) {
285 env->int_pgm_code = PGM_PER;
286 env->int_pgm_ilen = env->int_svc_ilen;
287 do_program_interrupt(env);
291 #define VIRTIO_SUBCODE_64 0x0D00
293 static void do_ext_interrupt(CPUS390XState *env)
295 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
296 S390CPU *cpu = env_archcpu(env);
297 uint64_t mask, addr;
298 uint16_t cpu_addr;
299 LowCore *lowcore;
301 if (!(env->psw.mask & PSW_MASK_EXT)) {
302 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
305 lowcore = cpu_map_lowcore(env);
307 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
308 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
309 MachineState *ms = MACHINE(qdev_get_machine());
310 unsigned int max_cpus = ms->smp.max_cpus;
312 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
313 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
314 g_assert(cpu_addr < S390_MAX_CPUS);
315 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
316 clear_bit(cpu_addr, env->emergency_signals);
317 if (bitmap_empty(env->emergency_signals, max_cpus)) {
318 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
320 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
321 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
322 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
323 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
324 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
325 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
326 (env->cregs[0] & CR0_CKC_SC)) {
327 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
328 lowcore->cpu_addr = 0;
329 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
330 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
331 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
332 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
333 lowcore->cpu_addr = 0;
334 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
335 } else if (qemu_s390_flic_has_service(flic) &&
336 (env->cregs[0] & CR0_SERVICE_SC)) {
337 uint32_t param;
339 param = qemu_s390_flic_dequeue_service(flic);
340 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
341 lowcore->ext_params = cpu_to_be32(param);
342 lowcore->cpu_addr = 0;
343 } else {
344 g_assert_not_reached();
347 mask = be64_to_cpu(lowcore->external_new_psw.mask);
348 addr = be64_to_cpu(lowcore->external_new_psw.addr);
349 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
350 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
352 cpu_unmap_lowcore(lowcore);
354 load_psw(env, mask, addr);
357 static void do_io_interrupt(CPUS390XState *env)
359 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
360 uint64_t mask, addr;
361 QEMUS390FlicIO *io;
362 LowCore *lowcore;
364 g_assert(env->psw.mask & PSW_MASK_IO);
365 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
366 g_assert(io);
368 lowcore = cpu_map_lowcore(env);
370 lowcore->subchannel_id = cpu_to_be16(io->id);
371 lowcore->subchannel_nr = cpu_to_be16(io->nr);
372 lowcore->io_int_parm = cpu_to_be32(io->parm);
373 lowcore->io_int_word = cpu_to_be32(io->word);
374 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
375 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
376 mask = be64_to_cpu(lowcore->io_new_psw.mask);
377 addr = be64_to_cpu(lowcore->io_new_psw.addr);
379 cpu_unmap_lowcore(lowcore);
380 g_free(io);
382 load_psw(env, mask, addr);
385 typedef struct MchkExtSaveArea {
386 uint64_t vregs[32][2]; /* 0x0000 */
387 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
388 } MchkExtSaveArea;
389 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
391 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
393 hwaddr len = sizeof(MchkExtSaveArea);
394 MchkExtSaveArea *sa;
395 int i;
397 sa = cpu_physical_memory_map(mcesao, &len, true);
398 if (!sa) {
399 return -EFAULT;
401 if (len != sizeof(MchkExtSaveArea)) {
402 cpu_physical_memory_unmap(sa, len, 1, 0);
403 return -EFAULT;
406 for (i = 0; i < 32; i++) {
407 sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
408 sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
411 cpu_physical_memory_unmap(sa, len, 1, len);
412 return 0;
415 static void do_mchk_interrupt(CPUS390XState *env)
417 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
418 uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
419 uint64_t mask, addr, mcesao = 0;
420 LowCore *lowcore;
421 int i;
423 /* for now we only support channel report machine checks (floating) */
424 g_assert(env->psw.mask & PSW_MASK_MCHECK);
425 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
427 qemu_s390_flic_dequeue_crw_mchk(flic);
429 lowcore = cpu_map_lowcore(env);
431 /* extended save area */
432 if (mcic & MCIC_VB_VR) {
433 /* length and alignment is 1024 bytes */
434 mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
437 /* try to store vector registers */
438 if (!mcesao || mchk_store_vregs(env, mcesao)) {
439 mcic &= ~MCIC_VB_VR;
442 /* we are always in z/Architecture mode */
443 lowcore->ar_access_id = 1;
445 for (i = 0; i < 16; i++) {
446 lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
447 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
448 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
449 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
451 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
452 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
453 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
454 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
455 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
457 lowcore->mcic = cpu_to_be64(mcic);
458 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
459 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
460 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
461 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
463 cpu_unmap_lowcore(lowcore);
465 load_psw(env, mask, addr);
468 void s390_cpu_do_interrupt(CPUState *cs)
470 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
471 S390CPU *cpu = S390_CPU(cs);
472 CPUS390XState *env = &cpu->env;
473 bool stopped = false;
475 qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
476 __func__, cs->exception_index, env->psw.mask, env->psw.addr);
478 try_deliver:
479 /* handle machine checks */
480 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
481 cs->exception_index = EXCP_MCHK;
483 /* handle external interrupts */
484 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
485 cs->exception_index = EXCP_EXT;
487 /* handle I/O interrupts */
488 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
489 cs->exception_index = EXCP_IO;
491 /* RESTART interrupt */
492 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
493 cs->exception_index = EXCP_RESTART;
495 /* STOP interrupt has least priority */
496 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
497 cs->exception_index = EXCP_STOP;
500 switch (cs->exception_index) {
501 case EXCP_PGM:
502 do_program_interrupt(env);
503 break;
504 case EXCP_SVC:
505 do_svc_interrupt(env);
506 break;
507 case EXCP_EXT:
508 do_ext_interrupt(env);
509 break;
510 case EXCP_IO:
511 do_io_interrupt(env);
512 break;
513 case EXCP_MCHK:
514 do_mchk_interrupt(env);
515 break;
516 case EXCP_RESTART:
517 do_restart_interrupt(env);
518 break;
519 case EXCP_STOP:
520 do_stop_interrupt(env);
521 stopped = true;
522 break;
525 if (cs->exception_index != -1 && !stopped) {
526 /* check if there are more pending interrupts to deliver */
527 cs->exception_index = -1;
528 goto try_deliver;
530 cs->exception_index = -1;
532 /* we might still have pending interrupts, but not deliverable */
533 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
534 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
537 /* WAIT PSW during interrupt injection or STOP interrupt */
538 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
539 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
540 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
541 } else if (cs->halted) {
542 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
543 s390_cpu_unhalt(cpu);
547 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
549 if (interrupt_request & CPU_INTERRUPT_HARD) {
550 S390CPU *cpu = S390_CPU(cs);
551 CPUS390XState *env = &cpu->env;
553 if (env->ex_value) {
554 /* Execution of the target insn is indivisible from
555 the parent EXECUTE insn. */
556 return false;
558 if (s390_cpu_has_int(cpu)) {
559 s390_cpu_do_interrupt(cs);
560 return true;
562 if (env->psw.mask & PSW_MASK_WAIT) {
563 /* Woken up because of a floating interrupt but it has already
564 * been delivered. Go back to sleep. */
565 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
568 return false;
571 void s390x_cpu_debug_excp_handler(CPUState *cs)
573 S390CPU *cpu = S390_CPU(cs);
574 CPUS390XState *env = &cpu->env;
575 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
577 if (wp_hit && wp_hit->flags & BP_CPU) {
578 /* FIXME: When the storage-alteration-space control bit is set,
579 the exception should only be triggered if the memory access
580 is done using an address space with the storage-alteration-event
581 bit set. We have no way to detect that with the current
582 watchpoint code. */
583 cs->watchpoint_hit = NULL;
585 env->per_address = env->psw.addr;
586 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
587 /* FIXME: We currently no way to detect the address space used
588 to trigger the watchpoint. For now just consider it is the
589 current default ASC. This turn to be true except when MVCP
590 and MVCS instrutions are not used. */
591 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
593 /* Remove all watchpoints to re-execute the code. A PER exception
594 will be triggered, it will call load_psw which will recompute
595 the watchpoints. */
596 cpu_watchpoint_remove_all(cs, BP_CPU);
597 cpu_loop_exit_noexc(cs);
601 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
602 this is only for the atomic operations, for which we want to raise a
603 specification exception. */
604 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
605 MMUAccessType access_type,
606 int mmu_idx, uintptr_t retaddr)
608 S390CPU *cpu = S390_CPU(cs);
609 CPUS390XState *env = &cpu->env;
611 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
614 static void QEMU_NORETURN monitor_event(CPUS390XState *env,
615 uint64_t monitor_code,
616 uint8_t monitor_class, uintptr_t ra)
618 /* Store the Monitor Code and the Monitor Class Number into the lowcore */
619 stq_phys(env_cpu(env)->as,
620 env->psa + offsetof(LowCore, monitor_code), monitor_code);
621 stw_phys(env_cpu(env)->as,
622 env->psa + offsetof(LowCore, mon_class_num), monitor_class);
624 tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
627 void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
628 uint32_t monitor_class)
630 g_assert(monitor_class <= 0xff);
632 if (env->cregs[8] & (0x8000 >> monitor_class)) {
633 monitor_event(env, monitor_code, monitor_class, GETPC());
637 #endif /* !CONFIG_USER_ONLY */