s390x/tcg: turn INTERRUPT_EXT into a mask
[qemu/ar7.git] / target / s390x / excp_helper.c
blobb58486b98b9452b97b9a64c1c0ef66c52ad533a3
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "internal.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
32 #endif
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
45 #endif
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState *cs)
55 cs->exception_index = -1;
58 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
59 int rw, int mmu_idx)
61 S390CPU *cpu = S390_CPU(cs);
63 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
86 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
87 int rw, int mmu_idx)
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 target_ulong vaddr, raddr;
92 uint64_t asc;
93 int prot;
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
98 orig_vaddr &= TARGET_PAGE_MASK;
99 vaddr = orig_vaddr;
101 if (mmu_idx < MMU_REAL_IDX) {
102 asc = cpu_mmu_idx_to_asc(mmu_idx);
103 /* 31-Bit mode */
104 if (!(env->psw.mask & PSW_MASK_64)) {
105 vaddr &= 0x7fffffff;
107 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
108 return 1;
110 } else if (mmu_idx == MMU_REAL_IDX) {
111 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
112 return 1;
114 } else {
115 abort();
118 /* check out of RAM access */
119 if (!address_space_access_valid(&address_space_memory, raddr,
120 TARGET_PAGE_SIZE, rw)) {
121 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
122 (uint64_t)raddr, (uint64_t)ram_size);
123 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
124 return 1;
127 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
128 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
130 tlb_set_page(cs, orig_vaddr, raddr, prot,
131 mmu_idx, TARGET_PAGE_SIZE);
133 return 0;
136 static void do_program_interrupt(CPUS390XState *env)
138 uint64_t mask, addr;
139 LowCore *lowcore;
140 int ilen = env->int_pgm_ilen;
142 if (ilen == ILEN_AUTO) {
143 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
145 assert(ilen == 2 || ilen == 4 || ilen == 6);
147 switch (env->int_pgm_code) {
148 case PGM_PER:
149 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
150 break;
152 /* FALL THROUGH */
153 case PGM_OPERATION:
154 case PGM_PRIVILEGED:
155 case PGM_EXECUTE:
156 case PGM_PROTECTION:
157 case PGM_ADDRESSING:
158 case PGM_SPECIFICATION:
159 case PGM_DATA:
160 case PGM_FIXPT_OVERFLOW:
161 case PGM_FIXPT_DIVIDE:
162 case PGM_DEC_OVERFLOW:
163 case PGM_DEC_DIVIDE:
164 case PGM_HFP_EXP_OVERFLOW:
165 case PGM_HFP_EXP_UNDERFLOW:
166 case PGM_HFP_SIGNIFICANCE:
167 case PGM_HFP_DIVIDE:
168 case PGM_TRANS_SPEC:
169 case PGM_SPECIAL_OP:
170 case PGM_OPERAND:
171 case PGM_HFP_SQRT:
172 case PGM_PC_TRANS_SPEC:
173 case PGM_ALET_SPEC:
174 case PGM_MONITOR:
175 /* advance the PSW if our exception is not nullifying */
176 env->psw.addr += ilen;
177 break;
180 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
181 __func__, env->int_pgm_code, ilen);
183 lowcore = cpu_map_lowcore(env);
185 /* Signal PER events with the exception. */
186 if (env->per_perc_atmid) {
187 env->int_pgm_code |= PGM_PER;
188 lowcore->per_address = cpu_to_be64(env->per_address);
189 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
190 env->per_perc_atmid = 0;
193 lowcore->pgm_ilen = cpu_to_be16(ilen);
194 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
195 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
196 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
197 mask = be64_to_cpu(lowcore->program_new_psw.mask);
198 addr = be64_to_cpu(lowcore->program_new_psw.addr);
199 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
201 cpu_unmap_lowcore(lowcore);
203 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
204 env->int_pgm_code, ilen, env->psw.mask,
205 env->psw.addr);
207 load_psw(env, mask, addr);
210 static void do_svc_interrupt(CPUS390XState *env)
212 uint64_t mask, addr;
213 LowCore *lowcore;
215 lowcore = cpu_map_lowcore(env);
217 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
218 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
219 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
220 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
221 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
222 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
224 cpu_unmap_lowcore(lowcore);
226 load_psw(env, mask, addr);
228 /* When a PER event is pending, the PER exception has to happen
229 immediately after the SERVICE CALL one. */
230 if (env->per_perc_atmid) {
231 env->int_pgm_code = PGM_PER;
232 env->int_pgm_ilen = env->int_svc_ilen;
233 do_program_interrupt(env);
237 #define VIRTIO_SUBCODE_64 0x0D00
239 static void do_ext_interrupt(CPUS390XState *env)
241 S390CPU *cpu = s390_env_get_cpu(env);
242 uint64_t mask, addr;
243 LowCore *lowcore;
244 ExtQueue *q;
246 if (!(env->psw.mask & PSW_MASK_EXT)) {
247 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
250 lowcore = cpu_map_lowcore(env);
252 if (env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) {
253 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
254 lowcore->cpu_addr = 0;
255 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
256 } else if (env->pending_int & INTERRUPT_EXT_CPU_TIMER) {
257 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
258 lowcore->cpu_addr = 0;
259 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
260 } else if (env->pending_int & INTERRUPT_EXT_SERVICE) {
261 g_assert(env->ext_index >= 0);
263 * FIXME: floating IRQs should be considered by all CPUs and
264 * shuld not get cleared by CPU reset.
266 q = &env->ext_queue[env->ext_index];
267 lowcore->ext_int_code = cpu_to_be16(q->code);
268 lowcore->ext_params = cpu_to_be32(q->param);
269 lowcore->ext_params2 = cpu_to_be64(q->param64);
270 lowcore->cpu_addr = cpu_to_be16(env->core_id | VIRTIO_SUBCODE_64);
271 env->ext_index--;
272 if (env->ext_index == -1) {
273 env->pending_int &= ~INTERRUPT_EXT_SERVICE;
275 } else {
276 g_assert_not_reached();
279 mask = be64_to_cpu(lowcore->external_new_psw.mask);
280 addr = be64_to_cpu(lowcore->external_new_psw.addr);
281 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
282 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
284 cpu_unmap_lowcore(lowcore);
286 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
287 env->psw.mask, env->psw.addr);
289 load_psw(env, mask, addr);
292 static void do_io_interrupt(CPUS390XState *env)
294 S390CPU *cpu = s390_env_get_cpu(env);
295 LowCore *lowcore;
296 IOIntQueue *q;
297 uint8_t isc;
298 int disable = 1;
299 int found = 0;
301 if (!(env->psw.mask & PSW_MASK_IO)) {
302 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
305 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
306 uint64_t isc_bits;
308 if (env->io_index[isc] < 0) {
309 continue;
311 if (env->io_index[isc] >= MAX_IO_QUEUE) {
312 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
313 isc, env->io_index[isc]);
316 q = &env->io_queue[env->io_index[isc]][isc];
317 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
318 if (!(env->cregs[6] & isc_bits)) {
319 disable = 0;
320 continue;
322 if (!found) {
323 uint64_t mask, addr;
325 found = 1;
326 lowcore = cpu_map_lowcore(env);
328 lowcore->subchannel_id = cpu_to_be16(q->id);
329 lowcore->subchannel_nr = cpu_to_be16(q->nr);
330 lowcore->io_int_parm = cpu_to_be32(q->parm);
331 lowcore->io_int_word = cpu_to_be32(q->word);
332 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
333 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
334 mask = be64_to_cpu(lowcore->io_new_psw.mask);
335 addr = be64_to_cpu(lowcore->io_new_psw.addr);
337 cpu_unmap_lowcore(lowcore);
339 env->io_index[isc]--;
341 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
342 env->psw.mask, env->psw.addr);
343 load_psw(env, mask, addr);
345 if (env->io_index[isc] >= 0) {
346 disable = 0;
348 continue;
351 if (disable) {
352 env->pending_int &= ~INTERRUPT_IO;
357 static void do_mchk_interrupt(CPUS390XState *env)
359 S390CPU *cpu = s390_env_get_cpu(env);
360 uint64_t mask, addr;
361 LowCore *lowcore;
362 MchkQueue *q;
363 int i;
365 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
366 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
369 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
370 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
373 q = &env->mchk_queue[env->mchk_index];
375 if (q->type != 1) {
376 /* Don't know how to handle this... */
377 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
379 if (!(env->cregs[14] & (1 << 28))) {
380 /* CRW machine checks disabled */
381 return;
384 lowcore = cpu_map_lowcore(env);
386 for (i = 0; i < 16; i++) {
387 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
388 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
389 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
390 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
392 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
393 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
394 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
395 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
396 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
397 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
398 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
400 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
401 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
402 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
403 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
404 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
405 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
407 cpu_unmap_lowcore(lowcore);
409 env->mchk_index--;
410 if (env->mchk_index == -1) {
411 env->pending_int &= ~INTERRUPT_MCHK;
414 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
415 env->psw.mask, env->psw.addr);
417 load_psw(env, mask, addr);
420 void s390_cpu_do_interrupt(CPUState *cs)
422 S390CPU *cpu = S390_CPU(cs);
423 CPUS390XState *env = &cpu->env;
425 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
426 __func__, cs->exception_index, env->psw.addr);
428 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
429 /* handle machine checks */
430 if ((env->psw.mask & PSW_MASK_MCHECK) &&
431 (cs->exception_index == -1)) {
432 if (env->pending_int & INTERRUPT_MCHK) {
433 cs->exception_index = EXCP_MCHK;
436 /* handle external interrupts */
437 if ((env->psw.mask & PSW_MASK_EXT) &&
438 cs->exception_index == -1 &&
439 (env->pending_int & INTERRUPT_EXT)) {
440 cs->exception_index = EXCP_EXT;
442 /* handle I/O interrupts */
443 if ((env->psw.mask & PSW_MASK_IO) &&
444 (cs->exception_index == -1)) {
445 if (env->pending_int & INTERRUPT_IO) {
446 cs->exception_index = EXCP_IO;
450 switch (cs->exception_index) {
451 case EXCP_PGM:
452 do_program_interrupt(env);
453 break;
454 case EXCP_SVC:
455 do_svc_interrupt(env);
456 break;
457 case EXCP_EXT:
458 do_ext_interrupt(env);
459 break;
460 case EXCP_IO:
461 do_io_interrupt(env);
462 break;
463 case EXCP_MCHK:
464 do_mchk_interrupt(env);
465 break;
467 cs->exception_index = -1;
469 if (!env->pending_int) {
470 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
474 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
476 if (interrupt_request & CPU_INTERRUPT_HARD) {
477 S390CPU *cpu = S390_CPU(cs);
478 CPUS390XState *env = &cpu->env;
480 if (env->ex_value) {
481 /* Execution of the target insn is indivisible from
482 the parent EXECUTE insn. */
483 return false;
485 if (env->psw.mask & PSW_MASK_EXT) {
486 s390_cpu_do_interrupt(cs);
487 return true;
490 return false;
493 void s390x_cpu_debug_excp_handler(CPUState *cs)
495 S390CPU *cpu = S390_CPU(cs);
496 CPUS390XState *env = &cpu->env;
497 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
499 if (wp_hit && wp_hit->flags & BP_CPU) {
500 /* FIXME: When the storage-alteration-space control bit is set,
501 the exception should only be triggered if the memory access
502 is done using an address space with the storage-alteration-event
503 bit set. We have no way to detect that with the current
504 watchpoint code. */
505 cs->watchpoint_hit = NULL;
507 env->per_address = env->psw.addr;
508 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
509 /* FIXME: We currently no way to detect the address space used
510 to trigger the watchpoint. For now just consider it is the
511 current default ASC. This turn to be true except when MVCP
512 and MVCS instrutions are not used. */
513 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
515 /* Remove all watchpoints to re-execute the code. A PER exception
516 will be triggered, it will call load_psw which will recompute
517 the watchpoints. */
518 cpu_watchpoint_remove_all(cs, BP_CPU);
519 cpu_loop_exit_noexc(cs);
523 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
524 this is only for the atomic operations, for which we want to raise a
525 specification exception. */
526 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
527 MMUAccessType access_type,
528 int mmu_idx, uintptr_t retaddr)
530 S390CPU *cpu = S390_CPU(cs);
531 CPUS390XState *env = &cpu->env;
533 if (retaddr) {
534 cpu_restore_state(cs, retaddr);
536 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
539 #endif /* CONFIG_USER_ONLY */