s390x/tcg: tolerate wrong wakeups due to floating interrupts
[qemu/ar7.git] / target / s390x / excp_helper.c
blob23447af942c723e5e14813fa71e91f344db20374
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "internal.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
32 #endif
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
45 #endif
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState *cs)
55 cs->exception_index = -1;
58 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
59 int rw, int mmu_idx)
61 S390CPU *cpu = S390_CPU(cs);
63 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
86 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
87 int rw, int mmu_idx)
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 target_ulong vaddr, raddr;
92 uint64_t asc;
93 int prot;
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
98 vaddr = orig_vaddr;
100 if (mmu_idx < MMU_REAL_IDX) {
101 asc = cpu_mmu_idx_to_asc(mmu_idx);
102 /* 31-Bit mode */
103 if (!(env->psw.mask & PSW_MASK_64)) {
104 vaddr &= 0x7fffffff;
106 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
107 return 1;
109 } else if (mmu_idx == MMU_REAL_IDX) {
110 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
111 return 1;
113 } else {
114 abort();
117 /* check out of RAM access */
118 if (!address_space_access_valid(&address_space_memory, raddr,
119 TARGET_PAGE_SIZE, rw)) {
120 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
121 (uint64_t)raddr, (uint64_t)ram_size);
122 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
123 return 1;
126 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
127 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
129 tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
130 mmu_idx, TARGET_PAGE_SIZE);
132 return 0;
135 static void do_program_interrupt(CPUS390XState *env)
137 uint64_t mask, addr;
138 LowCore *lowcore;
139 int ilen = env->int_pgm_ilen;
141 if (ilen == ILEN_AUTO) {
142 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
144 assert(ilen == 2 || ilen == 4 || ilen == 6);
146 switch (env->int_pgm_code) {
147 case PGM_PER:
148 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
149 break;
151 /* FALL THROUGH */
152 case PGM_OPERATION:
153 case PGM_PRIVILEGED:
154 case PGM_EXECUTE:
155 case PGM_PROTECTION:
156 case PGM_ADDRESSING:
157 case PGM_SPECIFICATION:
158 case PGM_DATA:
159 case PGM_FIXPT_OVERFLOW:
160 case PGM_FIXPT_DIVIDE:
161 case PGM_DEC_OVERFLOW:
162 case PGM_DEC_DIVIDE:
163 case PGM_HFP_EXP_OVERFLOW:
164 case PGM_HFP_EXP_UNDERFLOW:
165 case PGM_HFP_SIGNIFICANCE:
166 case PGM_HFP_DIVIDE:
167 case PGM_TRANS_SPEC:
168 case PGM_SPECIAL_OP:
169 case PGM_OPERAND:
170 case PGM_HFP_SQRT:
171 case PGM_PC_TRANS_SPEC:
172 case PGM_ALET_SPEC:
173 case PGM_MONITOR:
174 /* advance the PSW if our exception is not nullifying */
175 env->psw.addr += ilen;
176 break;
179 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
180 __func__, env->int_pgm_code, ilen);
182 lowcore = cpu_map_lowcore(env);
184 /* Signal PER events with the exception. */
185 if (env->per_perc_atmid) {
186 env->int_pgm_code |= PGM_PER;
187 lowcore->per_address = cpu_to_be64(env->per_address);
188 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
189 env->per_perc_atmid = 0;
192 lowcore->pgm_ilen = cpu_to_be16(ilen);
193 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
194 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
195 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
196 mask = be64_to_cpu(lowcore->program_new_psw.mask);
197 addr = be64_to_cpu(lowcore->program_new_psw.addr);
198 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
200 cpu_unmap_lowcore(lowcore);
202 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
203 env->int_pgm_code, ilen, env->psw.mask,
204 env->psw.addr);
206 load_psw(env, mask, addr);
209 static void do_svc_interrupt(CPUS390XState *env)
211 uint64_t mask, addr;
212 LowCore *lowcore;
214 lowcore = cpu_map_lowcore(env);
216 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
217 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
218 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
219 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
220 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
221 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
223 cpu_unmap_lowcore(lowcore);
225 load_psw(env, mask, addr);
227 /* When a PER event is pending, the PER exception has to happen
228 immediately after the SERVICE CALL one. */
229 if (env->per_perc_atmid) {
230 env->int_pgm_code = PGM_PER;
231 env->int_pgm_ilen = env->int_svc_ilen;
232 do_program_interrupt(env);
236 #define VIRTIO_SUBCODE_64 0x0D00
238 static void do_ext_interrupt(CPUS390XState *env)
240 S390CPU *cpu = s390_env_get_cpu(env);
241 uint64_t mask, addr;
242 uint16_t cpu_addr;
243 LowCore *lowcore;
245 if (!(env->psw.mask & PSW_MASK_EXT)) {
246 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
249 lowcore = cpu_map_lowcore(env);
251 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
252 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
253 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
254 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
255 g_assert(cpu_addr < S390_MAX_CPUS);
256 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
257 clear_bit(cpu_addr, env->emergency_signals);
258 if (bitmap_empty(env->emergency_signals, max_cpus)) {
259 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
261 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
262 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
263 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
264 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
265 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
266 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
267 (env->cregs[0] & CR0_CKC_SC)) {
268 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
269 lowcore->cpu_addr = 0;
270 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
271 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
272 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
273 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
274 lowcore->cpu_addr = 0;
275 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
276 } else if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
277 (env->cregs[0] & CR0_SERVICE_SC)) {
279 * FIXME: floating IRQs should be considered by all CPUs and
280 * shuld not get cleared by CPU reset.
282 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
283 lowcore->ext_params = cpu_to_be32(env->service_param);
284 lowcore->cpu_addr = 0;
285 env->service_param = 0;
286 env->pending_int &= ~INTERRUPT_EXT_SERVICE;
287 } else {
288 g_assert_not_reached();
291 mask = be64_to_cpu(lowcore->external_new_psw.mask);
292 addr = be64_to_cpu(lowcore->external_new_psw.addr);
293 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
294 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
296 cpu_unmap_lowcore(lowcore);
298 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
299 env->psw.mask, env->psw.addr);
301 load_psw(env, mask, addr);
304 static void do_io_interrupt(CPUS390XState *env)
306 S390CPU *cpu = s390_env_get_cpu(env);
307 LowCore *lowcore;
308 IOIntQueue *q;
309 uint8_t isc;
310 int disable = 1;
311 int found = 0;
313 if (!(env->psw.mask & PSW_MASK_IO)) {
314 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
317 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
318 uint64_t isc_bits;
320 if (env->io_index[isc] < 0) {
321 continue;
323 if (env->io_index[isc] >= MAX_IO_QUEUE) {
324 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
325 isc, env->io_index[isc]);
328 q = &env->io_queue[env->io_index[isc]][isc];
329 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
330 if (!(env->cregs[6] & isc_bits)) {
331 disable = 0;
332 continue;
334 if (!found) {
335 uint64_t mask, addr;
337 found = 1;
338 lowcore = cpu_map_lowcore(env);
340 lowcore->subchannel_id = cpu_to_be16(q->id);
341 lowcore->subchannel_nr = cpu_to_be16(q->nr);
342 lowcore->io_int_parm = cpu_to_be32(q->parm);
343 lowcore->io_int_word = cpu_to_be32(q->word);
344 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
345 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
346 mask = be64_to_cpu(lowcore->io_new_psw.mask);
347 addr = be64_to_cpu(lowcore->io_new_psw.addr);
349 cpu_unmap_lowcore(lowcore);
351 env->io_index[isc]--;
353 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
354 env->psw.mask, env->psw.addr);
355 load_psw(env, mask, addr);
357 if (env->io_index[isc] >= 0) {
358 disable = 0;
360 continue;
363 if (disable) {
364 env->pending_int &= ~INTERRUPT_IO;
369 static void do_mchk_interrupt(CPUS390XState *env)
371 uint64_t mask, addr;
372 LowCore *lowcore;
373 int i;
375 /* for now we only support channel report machine checks (floating) */
376 g_assert(env->psw.mask & PSW_MASK_MCHECK);
377 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
379 g_assert(env->pending_int & INTERRUPT_MCHK);
380 env->pending_int &= ~INTERRUPT_MCHK;
382 lowcore = cpu_map_lowcore(env);
384 /* we are always in z/Architecture mode */
385 lowcore->ar_access_id = 1;
387 for (i = 0; i < 16; i++) {
388 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
389 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
390 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
391 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
393 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
394 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
395 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
396 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
397 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
399 lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
400 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
401 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
402 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
403 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
405 cpu_unmap_lowcore(lowcore);
407 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
408 env->psw.mask, env->psw.addr);
410 load_psw(env, mask, addr);
413 void s390_cpu_do_interrupt(CPUState *cs)
415 S390CPU *cpu = S390_CPU(cs);
416 CPUS390XState *env = &cpu->env;
417 bool stopped = false;
419 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
420 __func__, cs->exception_index, env->psw.addr);
422 try_deliver:
423 /* handle machine checks */
424 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
425 cs->exception_index = EXCP_MCHK;
427 /* handle external interrupts */
428 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
429 cs->exception_index = EXCP_EXT;
431 /* handle I/O interrupts */
432 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
433 cs->exception_index = EXCP_IO;
435 /* RESTART interrupt */
436 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
437 cs->exception_index = EXCP_RESTART;
439 /* STOP interrupt has least priority */
440 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
441 cs->exception_index = EXCP_STOP;
444 switch (cs->exception_index) {
445 case EXCP_PGM:
446 do_program_interrupt(env);
447 break;
448 case EXCP_SVC:
449 do_svc_interrupt(env);
450 break;
451 case EXCP_EXT:
452 do_ext_interrupt(env);
453 break;
454 case EXCP_IO:
455 do_io_interrupt(env);
456 break;
457 case EXCP_MCHK:
458 do_mchk_interrupt(env);
459 break;
460 case EXCP_RESTART:
461 do_restart_interrupt(env);
462 break;
463 case EXCP_STOP:
464 do_stop_interrupt(env);
465 stopped = true;
466 break;
469 if (cs->exception_index != -1 && !stopped) {
470 /* check if there are more pending interrupts to deliver */
471 cs->exception_index = -1;
472 goto try_deliver;
474 cs->exception_index = -1;
476 /* we might still have pending interrupts, but not deliverable */
477 if (!env->pending_int) {
478 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
481 /* WAIT PSW during interrupt injection or STOP interrupt */
482 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
483 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
484 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
485 } else if (cs->halted) {
486 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
487 s390_cpu_unhalt(cpu);
491 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
493 if (interrupt_request & CPU_INTERRUPT_HARD) {
494 S390CPU *cpu = S390_CPU(cs);
495 CPUS390XState *env = &cpu->env;
497 if (env->ex_value) {
498 /* Execution of the target insn is indivisible from
499 the parent EXECUTE insn. */
500 return false;
502 if (s390_cpu_has_int(cpu)) {
503 s390_cpu_do_interrupt(cs);
504 return true;
506 if (env->psw.mask & PSW_MASK_WAIT) {
507 /* Woken up because of a floating interrupt but it has already
508 * been delivered. Go back to sleep. */
509 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
512 return false;
515 void s390x_cpu_debug_excp_handler(CPUState *cs)
517 S390CPU *cpu = S390_CPU(cs);
518 CPUS390XState *env = &cpu->env;
519 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
521 if (wp_hit && wp_hit->flags & BP_CPU) {
522 /* FIXME: When the storage-alteration-space control bit is set,
523 the exception should only be triggered if the memory access
524 is done using an address space with the storage-alteration-event
525 bit set. We have no way to detect that with the current
526 watchpoint code. */
527 cs->watchpoint_hit = NULL;
529 env->per_address = env->psw.addr;
530 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
531 /* FIXME: We currently no way to detect the address space used
532 to trigger the watchpoint. For now just consider it is the
533 current default ASC. This turn to be true except when MVCP
534 and MVCS instrutions are not used. */
535 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
537 /* Remove all watchpoints to re-execute the code. A PER exception
538 will be triggered, it will call load_psw which will recompute
539 the watchpoints. */
540 cpu_watchpoint_remove_all(cs, BP_CPU);
541 cpu_loop_exit_noexc(cs);
545 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
546 this is only for the atomic operations, for which we want to raise a
547 specification exception. */
548 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
549 MMUAccessType access_type,
550 int mmu_idx, uintptr_t retaddr)
552 S390CPU *cpu = S390_CPU(cs);
553 CPUS390XState *env = &cpu->env;
555 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
558 #endif /* CONFIG_USER_ONLY */