s390x: move tcg_s390_program_interrupt() into TCG code and mark it noreturn
[qemu/ar7.git] / target / s390x / excp_helper.c
blob5dab3387c30a2e8fb8e89698cfe955e37431bfba
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "qemu/timer.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "hw/s390x/ioinst.h"
28 #include "exec/address-spaces.h"
29 #include "tcg_s390x.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
32 #include "hw/s390x/s390_flic.h"
33 #endif
35 /* #define DEBUG_S390 */
36 /* #define DEBUG_S390_STDOUT */
38 #ifdef DEBUG_S390
39 #ifdef DEBUG_S390_STDOUT
40 #define DPRINTF(fmt, ...) \
41 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
42 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
43 #else
44 #define DPRINTF(fmt, ...) \
45 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
46 #endif
47 #else
48 #define DPRINTF(fmt, ...) \
49 do { } while (0)
50 #endif
52 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
53 int ilen, uintptr_t ra)
55 CPUState *cs = CPU(s390_env_get_cpu(env));
57 cpu_restore_state(cs, ra, true);
58 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
59 env->psw.addr);
60 trigger_pgm_exception(env, code, ilen);
61 cpu_loop_exit(cs);
64 #if defined(CONFIG_USER_ONLY)
66 void s390_cpu_do_interrupt(CPUState *cs)
68 cs->exception_index = -1;
71 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
72 int rw, int mmu_idx)
74 S390CPU *cpu = S390_CPU(cs);
76 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
77 /* On real machines this value is dropped into LowMem. Since this
78 is userland, simply put this someplace that cpu_loop can find it. */
79 cpu->env.__excp_addr = address;
80 return 1;
83 #else /* !CONFIG_USER_ONLY */
85 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
87 switch (mmu_idx) {
88 case MMU_PRIMARY_IDX:
89 return PSW_ASC_PRIMARY;
90 case MMU_SECONDARY_IDX:
91 return PSW_ASC_SECONDARY;
92 case MMU_HOME_IDX:
93 return PSW_ASC_HOME;
94 default:
95 abort();
99 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
100 int rw, int mmu_idx)
102 S390CPU *cpu = S390_CPU(cs);
103 CPUS390XState *env = &cpu->env;
104 target_ulong vaddr, raddr;
105 uint64_t asc;
106 int prot;
108 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
109 __func__, orig_vaddr, rw, mmu_idx);
111 vaddr = orig_vaddr;
113 if (mmu_idx < MMU_REAL_IDX) {
114 asc = cpu_mmu_idx_to_asc(mmu_idx);
115 /* 31-Bit mode */
116 if (!(env->psw.mask & PSW_MASK_64)) {
117 vaddr &= 0x7fffffff;
119 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
120 return 1;
122 } else if (mmu_idx == MMU_REAL_IDX) {
123 /* 31-Bit mode */
124 if (!(env->psw.mask & PSW_MASK_64)) {
125 vaddr &= 0x7fffffff;
127 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
128 return 1;
130 } else {
131 abort();
134 /* check out of RAM access */
135 if (!address_space_access_valid(&address_space_memory, raddr,
136 TARGET_PAGE_SIZE, rw,
137 MEMTXATTRS_UNSPECIFIED)) {
138 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
139 (uint64_t)raddr, (uint64_t)ram_size);
140 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
141 return 1;
144 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
145 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
147 tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
148 mmu_idx, TARGET_PAGE_SIZE);
150 return 0;
153 static void do_program_interrupt(CPUS390XState *env)
155 uint64_t mask, addr;
156 LowCore *lowcore;
157 int ilen = env->int_pgm_ilen;
159 if (ilen == ILEN_AUTO) {
160 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
162 assert(ilen == 2 || ilen == 4 || ilen == 6);
164 switch (env->int_pgm_code) {
165 case PGM_PER:
166 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
167 break;
169 /* FALL THROUGH */
170 case PGM_OPERATION:
171 case PGM_PRIVILEGED:
172 case PGM_EXECUTE:
173 case PGM_PROTECTION:
174 case PGM_ADDRESSING:
175 case PGM_SPECIFICATION:
176 case PGM_DATA:
177 case PGM_FIXPT_OVERFLOW:
178 case PGM_FIXPT_DIVIDE:
179 case PGM_DEC_OVERFLOW:
180 case PGM_DEC_DIVIDE:
181 case PGM_HFP_EXP_OVERFLOW:
182 case PGM_HFP_EXP_UNDERFLOW:
183 case PGM_HFP_SIGNIFICANCE:
184 case PGM_HFP_DIVIDE:
185 case PGM_TRANS_SPEC:
186 case PGM_SPECIAL_OP:
187 case PGM_OPERAND:
188 case PGM_HFP_SQRT:
189 case PGM_PC_TRANS_SPEC:
190 case PGM_ALET_SPEC:
191 case PGM_MONITOR:
192 /* advance the PSW if our exception is not nullifying */
193 env->psw.addr += ilen;
194 break;
197 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
198 __func__, env->int_pgm_code, ilen);
200 lowcore = cpu_map_lowcore(env);
202 /* Signal PER events with the exception. */
203 if (env->per_perc_atmid) {
204 env->int_pgm_code |= PGM_PER;
205 lowcore->per_address = cpu_to_be64(env->per_address);
206 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
207 env->per_perc_atmid = 0;
210 lowcore->pgm_ilen = cpu_to_be16(ilen);
211 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
212 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
213 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
214 mask = be64_to_cpu(lowcore->program_new_psw.mask);
215 addr = be64_to_cpu(lowcore->program_new_psw.addr);
216 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
218 cpu_unmap_lowcore(lowcore);
220 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
221 env->int_pgm_code, ilen, env->psw.mask,
222 env->psw.addr);
224 load_psw(env, mask, addr);
227 static void do_svc_interrupt(CPUS390XState *env)
229 uint64_t mask, addr;
230 LowCore *lowcore;
232 lowcore = cpu_map_lowcore(env);
234 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
235 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
236 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
237 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
238 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
239 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
241 cpu_unmap_lowcore(lowcore);
243 load_psw(env, mask, addr);
245 /* When a PER event is pending, the PER exception has to happen
246 immediately after the SERVICE CALL one. */
247 if (env->per_perc_atmid) {
248 env->int_pgm_code = PGM_PER;
249 env->int_pgm_ilen = env->int_svc_ilen;
250 do_program_interrupt(env);
254 #define VIRTIO_SUBCODE_64 0x0D00
256 static void do_ext_interrupt(CPUS390XState *env)
258 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
259 S390CPU *cpu = s390_env_get_cpu(env);
260 uint64_t mask, addr;
261 uint16_t cpu_addr;
262 LowCore *lowcore;
264 if (!(env->psw.mask & PSW_MASK_EXT)) {
265 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
268 lowcore = cpu_map_lowcore(env);
270 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
271 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
272 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
273 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
274 g_assert(cpu_addr < S390_MAX_CPUS);
275 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
276 clear_bit(cpu_addr, env->emergency_signals);
277 if (bitmap_empty(env->emergency_signals, max_cpus)) {
278 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
280 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
281 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
282 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
283 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
284 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
285 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
286 (env->cregs[0] & CR0_CKC_SC)) {
287 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
288 lowcore->cpu_addr = 0;
289 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
290 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
291 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
292 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
293 lowcore->cpu_addr = 0;
294 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
295 } else if (qemu_s390_flic_has_service(flic) &&
296 (env->cregs[0] & CR0_SERVICE_SC)) {
297 uint32_t param;
299 param = qemu_s390_flic_dequeue_service(flic);
300 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
301 lowcore->ext_params = cpu_to_be32(param);
302 lowcore->cpu_addr = 0;
303 } else {
304 g_assert_not_reached();
307 mask = be64_to_cpu(lowcore->external_new_psw.mask);
308 addr = be64_to_cpu(lowcore->external_new_psw.addr);
309 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
310 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
312 cpu_unmap_lowcore(lowcore);
314 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
315 env->psw.mask, env->psw.addr);
317 load_psw(env, mask, addr);
320 static void do_io_interrupt(CPUS390XState *env)
322 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
323 uint64_t mask, addr;
324 QEMUS390FlicIO *io;
325 LowCore *lowcore;
327 g_assert(env->psw.mask & PSW_MASK_IO);
328 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
329 g_assert(io);
331 lowcore = cpu_map_lowcore(env);
333 lowcore->subchannel_id = cpu_to_be16(io->id);
334 lowcore->subchannel_nr = cpu_to_be16(io->nr);
335 lowcore->io_int_parm = cpu_to_be32(io->parm);
336 lowcore->io_int_word = cpu_to_be32(io->word);
337 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
338 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
339 mask = be64_to_cpu(lowcore->io_new_psw.mask);
340 addr = be64_to_cpu(lowcore->io_new_psw.addr);
342 cpu_unmap_lowcore(lowcore);
343 g_free(io);
345 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask,
346 env->psw.addr);
347 load_psw(env, mask, addr);
350 static void do_mchk_interrupt(CPUS390XState *env)
352 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
353 uint64_t mask, addr;
354 LowCore *lowcore;
355 int i;
357 /* for now we only support channel report machine checks (floating) */
358 g_assert(env->psw.mask & PSW_MASK_MCHECK);
359 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
361 qemu_s390_flic_dequeue_crw_mchk(flic);
363 lowcore = cpu_map_lowcore(env);
365 /* we are always in z/Architecture mode */
366 lowcore->ar_access_id = 1;
368 for (i = 0; i < 16; i++) {
369 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
370 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
371 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
372 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
374 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
375 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
376 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
377 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
378 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
380 lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
381 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
382 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
383 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
384 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
386 cpu_unmap_lowcore(lowcore);
388 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
389 env->psw.mask, env->psw.addr);
391 load_psw(env, mask, addr);
394 void s390_cpu_do_interrupt(CPUState *cs)
396 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
397 S390CPU *cpu = S390_CPU(cs);
398 CPUS390XState *env = &cpu->env;
399 bool stopped = false;
401 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
402 __func__, cs->exception_index, env->psw.addr);
404 try_deliver:
405 /* handle machine checks */
406 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
407 cs->exception_index = EXCP_MCHK;
409 /* handle external interrupts */
410 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
411 cs->exception_index = EXCP_EXT;
413 /* handle I/O interrupts */
414 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
415 cs->exception_index = EXCP_IO;
417 /* RESTART interrupt */
418 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
419 cs->exception_index = EXCP_RESTART;
421 /* STOP interrupt has least priority */
422 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
423 cs->exception_index = EXCP_STOP;
426 switch (cs->exception_index) {
427 case EXCP_PGM:
428 do_program_interrupt(env);
429 break;
430 case EXCP_SVC:
431 do_svc_interrupt(env);
432 break;
433 case EXCP_EXT:
434 do_ext_interrupt(env);
435 break;
436 case EXCP_IO:
437 do_io_interrupt(env);
438 break;
439 case EXCP_MCHK:
440 do_mchk_interrupt(env);
441 break;
442 case EXCP_RESTART:
443 do_restart_interrupt(env);
444 break;
445 case EXCP_STOP:
446 do_stop_interrupt(env);
447 stopped = true;
448 break;
451 if (cs->exception_index != -1 && !stopped) {
452 /* check if there are more pending interrupts to deliver */
453 cs->exception_index = -1;
454 goto try_deliver;
456 cs->exception_index = -1;
458 /* we might still have pending interrupts, but not deliverable */
459 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
460 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
463 /* WAIT PSW during interrupt injection or STOP interrupt */
464 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
465 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
466 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
467 } else if (cs->halted) {
468 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
469 s390_cpu_unhalt(cpu);
473 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
475 if (interrupt_request & CPU_INTERRUPT_HARD) {
476 S390CPU *cpu = S390_CPU(cs);
477 CPUS390XState *env = &cpu->env;
479 if (env->ex_value) {
480 /* Execution of the target insn is indivisible from
481 the parent EXECUTE insn. */
482 return false;
484 if (s390_cpu_has_int(cpu)) {
485 s390_cpu_do_interrupt(cs);
486 return true;
488 if (env->psw.mask & PSW_MASK_WAIT) {
489 /* Woken up because of a floating interrupt but it has already
490 * been delivered. Go back to sleep. */
491 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
494 return false;
497 void s390x_cpu_debug_excp_handler(CPUState *cs)
499 S390CPU *cpu = S390_CPU(cs);
500 CPUS390XState *env = &cpu->env;
501 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
503 if (wp_hit && wp_hit->flags & BP_CPU) {
504 /* FIXME: When the storage-alteration-space control bit is set,
505 the exception should only be triggered if the memory access
506 is done using an address space with the storage-alteration-event
507 bit set. We have no way to detect that with the current
508 watchpoint code. */
509 cs->watchpoint_hit = NULL;
511 env->per_address = env->psw.addr;
512 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
513 /* FIXME: We currently no way to detect the address space used
514 to trigger the watchpoint. For now just consider it is the
515 current default ASC. This turn to be true except when MVCP
516 and MVCS instrutions are not used. */
517 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
519 /* Remove all watchpoints to re-execute the code. A PER exception
520 will be triggered, it will call load_psw which will recompute
521 the watchpoints. */
522 cpu_watchpoint_remove_all(cs, BP_CPU);
523 cpu_loop_exit_noexc(cs);
527 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
528 this is only for the atomic operations, for which we want to raise a
529 specification exception. */
530 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
531 MMUAccessType access_type,
532 int mmu_idx, uintptr_t retaddr)
534 S390CPU *cpu = S390_CPU(cs);
535 CPUS390XState *env = &cpu->env;
537 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
540 #endif /* CONFIG_USER_ONLY */