Merge remote-tracking branch 'remotes/armbru/tags/pull-build-2019-07-02-v2' into...
[qemu/ar7.git] / target / s390x / excp_helper.c
blob202456cdc587686413838e54709abf0e669403c4
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
34 #endif
36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
37 int ilen, uintptr_t ra)
39 CPUState *cs = env_cpu(env);
41 cpu_restore_state(cs, ra, true);
42 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
43 env->psw.addr);
44 trigger_pgm_exception(env, code, ilen);
45 cpu_loop_exit(cs);
48 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
49 uintptr_t ra)
51 g_assert(dxc <= 0xff);
52 #if !defined(CONFIG_USER_ONLY)
53 /* Store the DXC into the lowcore */
54 stl_phys(env_cpu(env)->as,
55 env->psa + offsetof(LowCore, data_exc_code), dxc);
56 #endif
58 /* Store the DXC into the FPC if AFP is enabled */
59 if (env->cregs[0] & CR0_AFP) {
60 env->fpc = deposit32(env->fpc, 8, 8, dxc);
62 tcg_s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, ra);
65 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
66 uintptr_t ra)
68 g_assert(vxc <= 0xff);
69 #if !defined(CONFIG_USER_ONLY)
70 /* Always store the VXC into the lowcore, without AFP it is undefined */
71 stl_phys(env_cpu(env)->as,
72 env->psa + offsetof(LowCore, data_exc_code), vxc);
73 #endif
75 /* Always store the VXC into the FPC, without AFP it is undefined */
76 env->fpc = deposit32(env->fpc, 8, 8, vxc);
77 tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ILEN_AUTO, ra);
80 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 tcg_s390_data_exception(env, dxc, GETPC());
85 #if defined(CONFIG_USER_ONLY)
87 void s390_cpu_do_interrupt(CPUState *cs)
89 cs->exception_index = -1;
92 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
93 MMUAccessType access_type, int mmu_idx,
94 bool probe, uintptr_t retaddr)
96 S390CPU *cpu = S390_CPU(cs);
98 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
99 /* On real machines this value is dropped into LowMem. Since this
100 is userland, simply put this someplace that cpu_loop can find it. */
101 cpu->env.__excp_addr = address;
102 cpu_loop_exit_restore(cs, retaddr);
105 #else /* !CONFIG_USER_ONLY */
107 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
109 switch (mmu_idx) {
110 case MMU_PRIMARY_IDX:
111 return PSW_ASC_PRIMARY;
112 case MMU_SECONDARY_IDX:
113 return PSW_ASC_SECONDARY;
114 case MMU_HOME_IDX:
115 return PSW_ASC_HOME;
116 default:
117 abort();
121 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
122 MMUAccessType access_type, int mmu_idx,
123 bool probe, uintptr_t retaddr)
125 S390CPU *cpu = S390_CPU(cs);
126 CPUS390XState *env = &cpu->env;
127 target_ulong vaddr, raddr;
128 uint64_t asc;
129 int prot, fail;
131 qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
132 __func__, address, access_type, mmu_idx);
134 vaddr = address;
136 if (mmu_idx < MMU_REAL_IDX) {
137 asc = cpu_mmu_idx_to_asc(mmu_idx);
138 /* 31-Bit mode */
139 if (!(env->psw.mask & PSW_MASK_64)) {
140 vaddr &= 0x7fffffff;
142 fail = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, true);
143 } else if (mmu_idx == MMU_REAL_IDX) {
144 /* 31-Bit mode */
145 if (!(env->psw.mask & PSW_MASK_64)) {
146 vaddr &= 0x7fffffff;
148 fail = mmu_translate_real(env, vaddr, access_type, &raddr, &prot);
149 } else {
150 g_assert_not_reached();
153 /* check out of RAM access */
154 if (!fail &&
155 !address_space_access_valid(&address_space_memory, raddr,
156 TARGET_PAGE_SIZE, access_type,
157 MEMTXATTRS_UNSPECIFIED)) {
158 qemu_log_mask(CPU_LOG_MMU,
159 "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
160 __func__, (uint64_t)raddr, (uint64_t)ram_size);
161 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
162 fail = 1;
165 if (!fail) {
166 qemu_log_mask(CPU_LOG_MMU,
167 "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
168 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
169 tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
170 mmu_idx, TARGET_PAGE_SIZE);
171 return true;
173 if (probe) {
174 return false;
177 cpu_restore_state(cs, retaddr, true);
180 * The ILC value for code accesses is undefined. The important
181 * thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO,
182 * which would cause do_program_interrupt to attempt to read from
183 * env->psw.addr again. C.f. the condition in trigger_page_fault,
184 * but is not universally applied.
186 * ??? If we remove ILEN_AUTO, by moving the computation of ILEN
187 * into cpu_restore_state, then we may remove this entirely.
189 if (access_type == MMU_INST_FETCH) {
190 env->int_pgm_ilen = 2;
193 cpu_loop_exit(cs);
196 static void do_program_interrupt(CPUS390XState *env)
198 uint64_t mask, addr;
199 LowCore *lowcore;
200 int ilen = env->int_pgm_ilen;
202 if (ilen == ILEN_AUTO) {
203 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
205 assert(ilen == 2 || ilen == 4 || ilen == 6);
207 switch (env->int_pgm_code) {
208 case PGM_PER:
209 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
210 break;
212 /* FALL THROUGH */
213 case PGM_OPERATION:
214 case PGM_PRIVILEGED:
215 case PGM_EXECUTE:
216 case PGM_PROTECTION:
217 case PGM_ADDRESSING:
218 case PGM_SPECIFICATION:
219 case PGM_DATA:
220 case PGM_FIXPT_OVERFLOW:
221 case PGM_FIXPT_DIVIDE:
222 case PGM_DEC_OVERFLOW:
223 case PGM_DEC_DIVIDE:
224 case PGM_HFP_EXP_OVERFLOW:
225 case PGM_HFP_EXP_UNDERFLOW:
226 case PGM_HFP_SIGNIFICANCE:
227 case PGM_HFP_DIVIDE:
228 case PGM_TRANS_SPEC:
229 case PGM_SPECIAL_OP:
230 case PGM_OPERAND:
231 case PGM_HFP_SQRT:
232 case PGM_PC_TRANS_SPEC:
233 case PGM_ALET_SPEC:
234 case PGM_MONITOR:
235 /* advance the PSW if our exception is not nullifying */
236 env->psw.addr += ilen;
237 break;
240 qemu_log_mask(CPU_LOG_INT,
241 "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
242 __func__, env->int_pgm_code, ilen, env->psw.mask,
243 env->psw.addr);
245 lowcore = cpu_map_lowcore(env);
247 /* Signal PER events with the exception. */
248 if (env->per_perc_atmid) {
249 env->int_pgm_code |= PGM_PER;
250 lowcore->per_address = cpu_to_be64(env->per_address);
251 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
252 env->per_perc_atmid = 0;
255 lowcore->pgm_ilen = cpu_to_be16(ilen);
256 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
257 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
258 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
259 mask = be64_to_cpu(lowcore->program_new_psw.mask);
260 addr = be64_to_cpu(lowcore->program_new_psw.addr);
261 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
263 cpu_unmap_lowcore(lowcore);
265 load_psw(env, mask, addr);
268 static void do_svc_interrupt(CPUS390XState *env)
270 uint64_t mask, addr;
271 LowCore *lowcore;
273 lowcore = cpu_map_lowcore(env);
275 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
276 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
277 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
278 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
279 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
280 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
282 cpu_unmap_lowcore(lowcore);
284 load_psw(env, mask, addr);
286 /* When a PER event is pending, the PER exception has to happen
287 immediately after the SERVICE CALL one. */
288 if (env->per_perc_atmid) {
289 env->int_pgm_code = PGM_PER;
290 env->int_pgm_ilen = env->int_svc_ilen;
291 do_program_interrupt(env);
295 #define VIRTIO_SUBCODE_64 0x0D00
297 static void do_ext_interrupt(CPUS390XState *env)
299 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
300 S390CPU *cpu = env_archcpu(env);
301 uint64_t mask, addr;
302 uint16_t cpu_addr;
303 LowCore *lowcore;
305 if (!(env->psw.mask & PSW_MASK_EXT)) {
306 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
309 lowcore = cpu_map_lowcore(env);
311 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
312 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
313 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
314 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
315 g_assert(cpu_addr < S390_MAX_CPUS);
316 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
317 clear_bit(cpu_addr, env->emergency_signals);
318 if (bitmap_empty(env->emergency_signals, max_cpus)) {
319 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
321 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
322 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
323 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
324 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
325 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
326 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
327 (env->cregs[0] & CR0_CKC_SC)) {
328 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
329 lowcore->cpu_addr = 0;
330 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
331 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
332 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
333 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
334 lowcore->cpu_addr = 0;
335 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
336 } else if (qemu_s390_flic_has_service(flic) &&
337 (env->cregs[0] & CR0_SERVICE_SC)) {
338 uint32_t param;
340 param = qemu_s390_flic_dequeue_service(flic);
341 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
342 lowcore->ext_params = cpu_to_be32(param);
343 lowcore->cpu_addr = 0;
344 } else {
345 g_assert_not_reached();
348 mask = be64_to_cpu(lowcore->external_new_psw.mask);
349 addr = be64_to_cpu(lowcore->external_new_psw.addr);
350 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
351 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
353 cpu_unmap_lowcore(lowcore);
355 load_psw(env, mask, addr);
358 static void do_io_interrupt(CPUS390XState *env)
360 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
361 uint64_t mask, addr;
362 QEMUS390FlicIO *io;
363 LowCore *lowcore;
365 g_assert(env->psw.mask & PSW_MASK_IO);
366 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
367 g_assert(io);
369 lowcore = cpu_map_lowcore(env);
371 lowcore->subchannel_id = cpu_to_be16(io->id);
372 lowcore->subchannel_nr = cpu_to_be16(io->nr);
373 lowcore->io_int_parm = cpu_to_be32(io->parm);
374 lowcore->io_int_word = cpu_to_be32(io->word);
375 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
376 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
377 mask = be64_to_cpu(lowcore->io_new_psw.mask);
378 addr = be64_to_cpu(lowcore->io_new_psw.addr);
380 cpu_unmap_lowcore(lowcore);
381 g_free(io);
383 load_psw(env, mask, addr);
386 typedef struct MchkExtSaveArea {
387 uint64_t vregs[32][2]; /* 0x0000 */
388 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
389 } MchkExtSaveArea;
390 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
392 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
394 hwaddr len = sizeof(MchkExtSaveArea);
395 MchkExtSaveArea *sa;
396 int i;
398 sa = cpu_physical_memory_map(mcesao, &len, 1);
399 if (!sa) {
400 return -EFAULT;
402 if (len != sizeof(MchkExtSaveArea)) {
403 cpu_physical_memory_unmap(sa, len, 1, 0);
404 return -EFAULT;
407 for (i = 0; i < 32; i++) {
408 sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
409 sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
412 cpu_physical_memory_unmap(sa, len, 1, len);
413 return 0;
416 static void do_mchk_interrupt(CPUS390XState *env)
418 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
419 uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
420 uint64_t mask, addr, mcesao = 0;
421 LowCore *lowcore;
422 int i;
424 /* for now we only support channel report machine checks (floating) */
425 g_assert(env->psw.mask & PSW_MASK_MCHECK);
426 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
428 qemu_s390_flic_dequeue_crw_mchk(flic);
430 lowcore = cpu_map_lowcore(env);
432 /* extended save area */
433 if (mcic & MCIC_VB_VR) {
434 /* length and alignment is 1024 bytes */
435 mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
438 /* try to store vector registers */
439 if (!mcesao || mchk_store_vregs(env, mcesao)) {
440 mcic &= ~MCIC_VB_VR;
443 /* we are always in z/Architecture mode */
444 lowcore->ar_access_id = 1;
446 for (i = 0; i < 16; i++) {
447 lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
448 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
449 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
450 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
452 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
453 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
454 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
455 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
456 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
458 lowcore->mcic = cpu_to_be64(mcic);
459 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
460 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
461 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
462 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
464 cpu_unmap_lowcore(lowcore);
466 load_psw(env, mask, addr);
469 void s390_cpu_do_interrupt(CPUState *cs)
471 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
472 S390CPU *cpu = S390_CPU(cs);
473 CPUS390XState *env = &cpu->env;
474 bool stopped = false;
476 qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
477 __func__, cs->exception_index, env->psw.mask, env->psw.addr);
479 try_deliver:
480 /* handle machine checks */
481 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
482 cs->exception_index = EXCP_MCHK;
484 /* handle external interrupts */
485 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
486 cs->exception_index = EXCP_EXT;
488 /* handle I/O interrupts */
489 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
490 cs->exception_index = EXCP_IO;
492 /* RESTART interrupt */
493 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
494 cs->exception_index = EXCP_RESTART;
496 /* STOP interrupt has least priority */
497 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
498 cs->exception_index = EXCP_STOP;
501 switch (cs->exception_index) {
502 case EXCP_PGM:
503 do_program_interrupt(env);
504 break;
505 case EXCP_SVC:
506 do_svc_interrupt(env);
507 break;
508 case EXCP_EXT:
509 do_ext_interrupt(env);
510 break;
511 case EXCP_IO:
512 do_io_interrupt(env);
513 break;
514 case EXCP_MCHK:
515 do_mchk_interrupt(env);
516 break;
517 case EXCP_RESTART:
518 do_restart_interrupt(env);
519 break;
520 case EXCP_STOP:
521 do_stop_interrupt(env);
522 stopped = true;
523 break;
526 if (cs->exception_index != -1 && !stopped) {
527 /* check if there are more pending interrupts to deliver */
528 cs->exception_index = -1;
529 goto try_deliver;
531 cs->exception_index = -1;
533 /* we might still have pending interrupts, but not deliverable */
534 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
535 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
538 /* WAIT PSW during interrupt injection or STOP interrupt */
539 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
540 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
541 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
542 } else if (cs->halted) {
543 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
544 s390_cpu_unhalt(cpu);
548 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
550 if (interrupt_request & CPU_INTERRUPT_HARD) {
551 S390CPU *cpu = S390_CPU(cs);
552 CPUS390XState *env = &cpu->env;
554 if (env->ex_value) {
555 /* Execution of the target insn is indivisible from
556 the parent EXECUTE insn. */
557 return false;
559 if (s390_cpu_has_int(cpu)) {
560 s390_cpu_do_interrupt(cs);
561 return true;
563 if (env->psw.mask & PSW_MASK_WAIT) {
564 /* Woken up because of a floating interrupt but it has already
565 * been delivered. Go back to sleep. */
566 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
569 return false;
572 void s390x_cpu_debug_excp_handler(CPUState *cs)
574 S390CPU *cpu = S390_CPU(cs);
575 CPUS390XState *env = &cpu->env;
576 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
578 if (wp_hit && wp_hit->flags & BP_CPU) {
579 /* FIXME: When the storage-alteration-space control bit is set,
580 the exception should only be triggered if the memory access
581 is done using an address space with the storage-alteration-event
582 bit set. We have no way to detect that with the current
583 watchpoint code. */
584 cs->watchpoint_hit = NULL;
586 env->per_address = env->psw.addr;
587 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
588 /* FIXME: We currently no way to detect the address space used
589 to trigger the watchpoint. For now just consider it is the
590 current default ASC. This turn to be true except when MVCP
591 and MVCS instrutions are not used. */
592 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
594 /* Remove all watchpoints to re-execute the code. A PER exception
595 will be triggered, it will call load_psw which will recompute
596 the watchpoints. */
597 cpu_watchpoint_remove_all(cs, BP_CPU);
598 cpu_loop_exit_noexc(cs);
602 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
603 this is only for the atomic operations, for which we want to raise a
604 specification exception. */
605 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
606 MMUAccessType access_type,
607 int mmu_idx, uintptr_t retaddr)
609 S390CPU *cpu = S390_CPU(cs);
610 CPUS390XState *env = &cpu->env;
612 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
615 #endif /* CONFIG_USER_ONLY */