s390x/tcg: take care of external interrupt subclasses
[qemu/kevin.git] / target / s390x / excp_helper.c
blob11a85a665bcf294a1cc1246b7d93a36854ea81a7
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "internal.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
32 #endif
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
45 #endif
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState *cs)
55 cs->exception_index = -1;
58 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
59 int rw, int mmu_idx)
61 S390CPU *cpu = S390_CPU(cs);
63 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
86 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
87 int rw, int mmu_idx)
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 target_ulong vaddr, raddr;
92 uint64_t asc;
93 int prot;
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
98 orig_vaddr &= TARGET_PAGE_MASK;
99 vaddr = orig_vaddr;
101 if (mmu_idx < MMU_REAL_IDX) {
102 asc = cpu_mmu_idx_to_asc(mmu_idx);
103 /* 31-Bit mode */
104 if (!(env->psw.mask & PSW_MASK_64)) {
105 vaddr &= 0x7fffffff;
107 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
108 return 1;
110 } else if (mmu_idx == MMU_REAL_IDX) {
111 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
112 return 1;
114 } else {
115 abort();
118 /* check out of RAM access */
119 if (!address_space_access_valid(&address_space_memory, raddr,
120 TARGET_PAGE_SIZE, rw)) {
121 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
122 (uint64_t)raddr, (uint64_t)ram_size);
123 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
124 return 1;
127 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
128 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
130 tlb_set_page(cs, orig_vaddr, raddr, prot,
131 mmu_idx, TARGET_PAGE_SIZE);
133 return 0;
136 static void do_program_interrupt(CPUS390XState *env)
138 uint64_t mask, addr;
139 LowCore *lowcore;
140 int ilen = env->int_pgm_ilen;
142 if (ilen == ILEN_AUTO) {
143 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
145 assert(ilen == 2 || ilen == 4 || ilen == 6);
147 switch (env->int_pgm_code) {
148 case PGM_PER:
149 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
150 break;
152 /* FALL THROUGH */
153 case PGM_OPERATION:
154 case PGM_PRIVILEGED:
155 case PGM_EXECUTE:
156 case PGM_PROTECTION:
157 case PGM_ADDRESSING:
158 case PGM_SPECIFICATION:
159 case PGM_DATA:
160 case PGM_FIXPT_OVERFLOW:
161 case PGM_FIXPT_DIVIDE:
162 case PGM_DEC_OVERFLOW:
163 case PGM_DEC_DIVIDE:
164 case PGM_HFP_EXP_OVERFLOW:
165 case PGM_HFP_EXP_UNDERFLOW:
166 case PGM_HFP_SIGNIFICANCE:
167 case PGM_HFP_DIVIDE:
168 case PGM_TRANS_SPEC:
169 case PGM_SPECIAL_OP:
170 case PGM_OPERAND:
171 case PGM_HFP_SQRT:
172 case PGM_PC_TRANS_SPEC:
173 case PGM_ALET_SPEC:
174 case PGM_MONITOR:
175 /* advance the PSW if our exception is not nullifying */
176 env->psw.addr += ilen;
177 break;
180 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
181 __func__, env->int_pgm_code, ilen);
183 lowcore = cpu_map_lowcore(env);
185 /* Signal PER events with the exception. */
186 if (env->per_perc_atmid) {
187 env->int_pgm_code |= PGM_PER;
188 lowcore->per_address = cpu_to_be64(env->per_address);
189 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
190 env->per_perc_atmid = 0;
193 lowcore->pgm_ilen = cpu_to_be16(ilen);
194 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
195 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
196 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
197 mask = be64_to_cpu(lowcore->program_new_psw.mask);
198 addr = be64_to_cpu(lowcore->program_new_psw.addr);
199 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
201 cpu_unmap_lowcore(lowcore);
203 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
204 env->int_pgm_code, ilen, env->psw.mask,
205 env->psw.addr);
207 load_psw(env, mask, addr);
210 static void do_svc_interrupt(CPUS390XState *env)
212 uint64_t mask, addr;
213 LowCore *lowcore;
215 lowcore = cpu_map_lowcore(env);
217 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
218 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
219 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
220 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
221 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
222 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
224 cpu_unmap_lowcore(lowcore);
226 load_psw(env, mask, addr);
228 /* When a PER event is pending, the PER exception has to happen
229 immediately after the SERVICE CALL one. */
230 if (env->per_perc_atmid) {
231 env->int_pgm_code = PGM_PER;
232 env->int_pgm_ilen = env->int_svc_ilen;
233 do_program_interrupt(env);
237 #define VIRTIO_SUBCODE_64 0x0D00
239 static void do_ext_interrupt(CPUS390XState *env)
241 S390CPU *cpu = s390_env_get_cpu(env);
242 uint64_t mask, addr;
243 uint16_t cpu_addr;
244 LowCore *lowcore;
246 if (!(env->psw.mask & PSW_MASK_EXT)) {
247 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
250 lowcore = cpu_map_lowcore(env);
252 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
253 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
254 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
255 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
256 g_assert(cpu_addr < S390_MAX_CPUS);
257 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
258 clear_bit(cpu_addr, env->emergency_signals);
259 if (bitmap_empty(env->emergency_signals, max_cpus)) {
260 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
262 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
263 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
264 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
265 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
266 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
267 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
268 (env->cregs[0] & CR0_CKC_SC)) {
269 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
270 lowcore->cpu_addr = 0;
271 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
272 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
273 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
274 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
275 lowcore->cpu_addr = 0;
276 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
277 } else if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
278 (env->cregs[0] & CR0_SERVICE_SC)) {
280 * FIXME: floating IRQs should be considered by all CPUs and
281 * shuld not get cleared by CPU reset.
283 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
284 lowcore->ext_params = cpu_to_be32(env->service_param);
285 lowcore->cpu_addr = 0;
286 env->service_param = 0;
287 env->pending_int &= ~INTERRUPT_EXT_SERVICE;
288 } else {
289 g_assert_not_reached();
292 mask = be64_to_cpu(lowcore->external_new_psw.mask);
293 addr = be64_to_cpu(lowcore->external_new_psw.addr);
294 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
295 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
297 cpu_unmap_lowcore(lowcore);
299 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
300 env->psw.mask, env->psw.addr);
302 load_psw(env, mask, addr);
305 static void do_io_interrupt(CPUS390XState *env)
307 S390CPU *cpu = s390_env_get_cpu(env);
308 LowCore *lowcore;
309 IOIntQueue *q;
310 uint8_t isc;
311 int disable = 1;
312 int found = 0;
314 if (!(env->psw.mask & PSW_MASK_IO)) {
315 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
318 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
319 uint64_t isc_bits;
321 if (env->io_index[isc] < 0) {
322 continue;
324 if (env->io_index[isc] >= MAX_IO_QUEUE) {
325 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
326 isc, env->io_index[isc]);
329 q = &env->io_queue[env->io_index[isc]][isc];
330 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
331 if (!(env->cregs[6] & isc_bits)) {
332 disable = 0;
333 continue;
335 if (!found) {
336 uint64_t mask, addr;
338 found = 1;
339 lowcore = cpu_map_lowcore(env);
341 lowcore->subchannel_id = cpu_to_be16(q->id);
342 lowcore->subchannel_nr = cpu_to_be16(q->nr);
343 lowcore->io_int_parm = cpu_to_be32(q->parm);
344 lowcore->io_int_word = cpu_to_be32(q->word);
345 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
346 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
347 mask = be64_to_cpu(lowcore->io_new_psw.mask);
348 addr = be64_to_cpu(lowcore->io_new_psw.addr);
350 cpu_unmap_lowcore(lowcore);
352 env->io_index[isc]--;
354 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
355 env->psw.mask, env->psw.addr);
356 load_psw(env, mask, addr);
358 if (env->io_index[isc] >= 0) {
359 disable = 0;
361 continue;
364 if (disable) {
365 env->pending_int &= ~INTERRUPT_IO;
370 static void do_mchk_interrupt(CPUS390XState *env)
372 S390CPU *cpu = s390_env_get_cpu(env);
373 uint64_t mask, addr;
374 LowCore *lowcore;
375 MchkQueue *q;
376 int i;
378 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
379 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
382 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
383 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
386 q = &env->mchk_queue[env->mchk_index];
388 if (q->type != 1) {
389 /* Don't know how to handle this... */
390 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
392 if (!(env->cregs[14] & (1 << 28))) {
393 /* CRW machine checks disabled */
394 return;
397 lowcore = cpu_map_lowcore(env);
399 for (i = 0; i < 16; i++) {
400 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
401 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
402 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
403 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
405 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
406 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
407 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
408 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
409 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
410 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
411 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
413 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
414 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
415 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
416 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
417 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
418 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
420 cpu_unmap_lowcore(lowcore);
422 env->mchk_index--;
423 if (env->mchk_index == -1) {
424 env->pending_int &= ~INTERRUPT_MCHK;
427 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
428 env->psw.mask, env->psw.addr);
430 load_psw(env, mask, addr);
433 void s390_cpu_do_interrupt(CPUState *cs)
435 S390CPU *cpu = S390_CPU(cs);
436 CPUS390XState *env = &cpu->env;
438 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
439 __func__, cs->exception_index, env->psw.addr);
441 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
442 /* handle machine checks */
443 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
444 cs->exception_index = EXCP_MCHK;
446 /* handle external interrupts */
447 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
448 cs->exception_index = EXCP_EXT;
450 /* handle I/O interrupts */
451 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
452 cs->exception_index = EXCP_IO;
455 switch (cs->exception_index) {
456 case EXCP_PGM:
457 do_program_interrupt(env);
458 break;
459 case EXCP_SVC:
460 do_svc_interrupt(env);
461 break;
462 case EXCP_EXT:
463 do_ext_interrupt(env);
464 break;
465 case EXCP_IO:
466 do_io_interrupt(env);
467 break;
468 case EXCP_MCHK:
469 do_mchk_interrupt(env);
470 break;
472 cs->exception_index = -1;
474 /* we might still have pending interrupts, but not deliverable */
475 if (!env->pending_int) {
476 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
480 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
482 if (interrupt_request & CPU_INTERRUPT_HARD) {
483 S390CPU *cpu = S390_CPU(cs);
484 CPUS390XState *env = &cpu->env;
486 if (env->ex_value) {
487 /* Execution of the target insn is indivisible from
488 the parent EXECUTE insn. */
489 return false;
491 if (s390_cpu_has_int(cpu)) {
492 s390_cpu_do_interrupt(cs);
493 return true;
496 return false;
499 void s390x_cpu_debug_excp_handler(CPUState *cs)
501 S390CPU *cpu = S390_CPU(cs);
502 CPUS390XState *env = &cpu->env;
503 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
505 if (wp_hit && wp_hit->flags & BP_CPU) {
506 /* FIXME: When the storage-alteration-space control bit is set,
507 the exception should only be triggered if the memory access
508 is done using an address space with the storage-alteration-event
509 bit set. We have no way to detect that with the current
510 watchpoint code. */
511 cs->watchpoint_hit = NULL;
513 env->per_address = env->psw.addr;
514 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
515 /* FIXME: We currently no way to detect the address space used
516 to trigger the watchpoint. For now just consider it is the
517 current default ASC. This turn to be true except when MVCP
518 and MVCS instrutions are not used. */
519 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
521 /* Remove all watchpoints to re-execute the code. A PER exception
522 will be triggered, it will call load_psw which will recompute
523 the watchpoints. */
524 cpu_watchpoint_remove_all(cs, BP_CPU);
525 cpu_loop_exit_noexc(cs);
529 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
530 this is only for the atomic operations, for which we want to raise a
531 specification exception. */
532 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
533 MMUAccessType access_type,
534 int mmu_idx, uintptr_t retaddr)
536 S390CPU *cpu = S390_CPU(cs);
537 CPUS390XState *env = &cpu->env;
539 if (retaddr) {
540 cpu_restore_state(cs, retaddr);
542 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
545 #endif /* CONFIG_USER_ONLY */