s390x: sort some devices into categories
[qemu/ar7.git] / target / s390x / excp_helper.c
blob3e4349d00b4a69ee37d420362abb7703bd4506c5
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "internal.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu/sysemu.h"
32 #endif
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
45 #endif
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState *cs)
55 cs->exception_index = -1;
58 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
59 int rw, int mmu_idx)
61 S390CPU *cpu = S390_CPU(cs);
63 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
86 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
87 int rw, int mmu_idx)
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 target_ulong vaddr, raddr;
92 uint64_t asc;
93 int prot;
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
98 orig_vaddr &= TARGET_PAGE_MASK;
99 vaddr = orig_vaddr;
101 if (mmu_idx < MMU_REAL_IDX) {
102 asc = cpu_mmu_idx_to_asc(mmu_idx);
103 /* 31-Bit mode */
104 if (!(env->psw.mask & PSW_MASK_64)) {
105 vaddr &= 0x7fffffff;
107 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
108 return 1;
110 } else if (mmu_idx == MMU_REAL_IDX) {
111 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
112 return 1;
114 } else {
115 abort();
118 /* check out of RAM access */
119 if (!address_space_access_valid(&address_space_memory, raddr,
120 TARGET_PAGE_SIZE, rw)) {
121 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
122 (uint64_t)raddr, (uint64_t)ram_size);
123 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
124 return 1;
127 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
128 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
130 tlb_set_page(cs, orig_vaddr, raddr, prot,
131 mmu_idx, TARGET_PAGE_SIZE);
133 return 0;
136 static void do_program_interrupt(CPUS390XState *env)
138 uint64_t mask, addr;
139 LowCore *lowcore;
140 int ilen = env->int_pgm_ilen;
142 if (ilen == ILEN_AUTO) {
143 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
145 assert(ilen == 2 || ilen == 4 || ilen == 6);
147 switch (env->int_pgm_code) {
148 case PGM_PER:
149 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
150 break;
152 /* FALL THROUGH */
153 case PGM_OPERATION:
154 case PGM_PRIVILEGED:
155 case PGM_EXECUTE:
156 case PGM_PROTECTION:
157 case PGM_ADDRESSING:
158 case PGM_SPECIFICATION:
159 case PGM_DATA:
160 case PGM_FIXPT_OVERFLOW:
161 case PGM_FIXPT_DIVIDE:
162 case PGM_DEC_OVERFLOW:
163 case PGM_DEC_DIVIDE:
164 case PGM_HFP_EXP_OVERFLOW:
165 case PGM_HFP_EXP_UNDERFLOW:
166 case PGM_HFP_SIGNIFICANCE:
167 case PGM_HFP_DIVIDE:
168 case PGM_TRANS_SPEC:
169 case PGM_SPECIAL_OP:
170 case PGM_OPERAND:
171 case PGM_HFP_SQRT:
172 case PGM_PC_TRANS_SPEC:
173 case PGM_ALET_SPEC:
174 case PGM_MONITOR:
175 /* advance the PSW if our exception is not nullifying */
176 env->psw.addr += ilen;
177 break;
180 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
181 __func__, env->int_pgm_code, ilen);
183 lowcore = cpu_map_lowcore(env);
185 /* Signal PER events with the exception. */
186 if (env->per_perc_atmid) {
187 env->int_pgm_code |= PGM_PER;
188 lowcore->per_address = cpu_to_be64(env->per_address);
189 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
190 env->per_perc_atmid = 0;
193 lowcore->pgm_ilen = cpu_to_be16(ilen);
194 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
195 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
196 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
197 mask = be64_to_cpu(lowcore->program_new_psw.mask);
198 addr = be64_to_cpu(lowcore->program_new_psw.addr);
199 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
201 cpu_unmap_lowcore(lowcore);
203 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
204 env->int_pgm_code, ilen, env->psw.mask,
205 env->psw.addr);
207 load_psw(env, mask, addr);
210 static void do_svc_interrupt(CPUS390XState *env)
212 uint64_t mask, addr;
213 LowCore *lowcore;
215 lowcore = cpu_map_lowcore(env);
217 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
218 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
219 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
220 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
221 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
222 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
224 cpu_unmap_lowcore(lowcore);
226 load_psw(env, mask, addr);
228 /* When a PER event is pending, the PER exception has to happen
229 immediately after the SERVICE CALL one. */
230 if (env->per_perc_atmid) {
231 env->int_pgm_code = PGM_PER;
232 env->int_pgm_ilen = env->int_svc_ilen;
233 do_program_interrupt(env);
237 #define VIRTIO_SUBCODE_64 0x0D00
239 static void do_ext_interrupt(CPUS390XState *env)
241 S390CPU *cpu = s390_env_get_cpu(env);
242 uint64_t mask, addr;
243 LowCore *lowcore;
244 ExtQueue *q;
246 if (!(env->psw.mask & PSW_MASK_EXT)) {
247 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
250 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
251 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
254 q = &env->ext_queue[env->ext_index];
255 lowcore = cpu_map_lowcore(env);
257 lowcore->ext_int_code = cpu_to_be16(q->code);
258 lowcore->ext_params = cpu_to_be32(q->param);
259 lowcore->ext_params2 = cpu_to_be64(q->param64);
260 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
261 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
262 lowcore->cpu_addr = cpu_to_be16(env->core_id | VIRTIO_SUBCODE_64);
263 mask = be64_to_cpu(lowcore->external_new_psw.mask);
264 addr = be64_to_cpu(lowcore->external_new_psw.addr);
266 cpu_unmap_lowcore(lowcore);
268 env->ext_index--;
269 if (env->ext_index == -1) {
270 env->pending_int &= ~INTERRUPT_EXT;
273 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
274 env->psw.mask, env->psw.addr);
276 load_psw(env, mask, addr);
279 static void do_io_interrupt(CPUS390XState *env)
281 S390CPU *cpu = s390_env_get_cpu(env);
282 LowCore *lowcore;
283 IOIntQueue *q;
284 uint8_t isc;
285 int disable = 1;
286 int found = 0;
288 if (!(env->psw.mask & PSW_MASK_IO)) {
289 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
292 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
293 uint64_t isc_bits;
295 if (env->io_index[isc] < 0) {
296 continue;
298 if (env->io_index[isc] >= MAX_IO_QUEUE) {
299 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
300 isc, env->io_index[isc]);
303 q = &env->io_queue[env->io_index[isc]][isc];
304 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
305 if (!(env->cregs[6] & isc_bits)) {
306 disable = 0;
307 continue;
309 if (!found) {
310 uint64_t mask, addr;
312 found = 1;
313 lowcore = cpu_map_lowcore(env);
315 lowcore->subchannel_id = cpu_to_be16(q->id);
316 lowcore->subchannel_nr = cpu_to_be16(q->nr);
317 lowcore->io_int_parm = cpu_to_be32(q->parm);
318 lowcore->io_int_word = cpu_to_be32(q->word);
319 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
320 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
321 mask = be64_to_cpu(lowcore->io_new_psw.mask);
322 addr = be64_to_cpu(lowcore->io_new_psw.addr);
324 cpu_unmap_lowcore(lowcore);
326 env->io_index[isc]--;
328 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
329 env->psw.mask, env->psw.addr);
330 load_psw(env, mask, addr);
332 if (env->io_index[isc] >= 0) {
333 disable = 0;
335 continue;
338 if (disable) {
339 env->pending_int &= ~INTERRUPT_IO;
344 static void do_mchk_interrupt(CPUS390XState *env)
346 S390CPU *cpu = s390_env_get_cpu(env);
347 uint64_t mask, addr;
348 LowCore *lowcore;
349 MchkQueue *q;
350 int i;
352 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
353 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
356 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
357 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
360 q = &env->mchk_queue[env->mchk_index];
362 if (q->type != 1) {
363 /* Don't know how to handle this... */
364 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
366 if (!(env->cregs[14] & (1 << 28))) {
367 /* CRW machine checks disabled */
368 return;
371 lowcore = cpu_map_lowcore(env);
373 for (i = 0; i < 16; i++) {
374 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
375 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
376 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
377 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
379 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
380 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
381 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
382 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
383 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
384 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
385 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
387 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
388 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
389 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
390 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
391 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
392 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
394 cpu_unmap_lowcore(lowcore);
396 env->mchk_index--;
397 if (env->mchk_index == -1) {
398 env->pending_int &= ~INTERRUPT_MCHK;
401 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
402 env->psw.mask, env->psw.addr);
404 load_psw(env, mask, addr);
407 void s390_cpu_do_interrupt(CPUState *cs)
409 S390CPU *cpu = S390_CPU(cs);
410 CPUS390XState *env = &cpu->env;
412 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
413 __func__, cs->exception_index, env->psw.addr);
415 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
416 /* handle machine checks */
417 if ((env->psw.mask & PSW_MASK_MCHECK) &&
418 (cs->exception_index == -1)) {
419 if (env->pending_int & INTERRUPT_MCHK) {
420 cs->exception_index = EXCP_MCHK;
423 /* handle external interrupts */
424 if ((env->psw.mask & PSW_MASK_EXT) &&
425 cs->exception_index == -1) {
426 if (env->pending_int & INTERRUPT_EXT) {
427 /* code is already in env */
428 cs->exception_index = EXCP_EXT;
429 } else if (env->pending_int & INTERRUPT_TOD) {
430 cpu_inject_ext(cpu, 0x1004, 0, 0);
431 cs->exception_index = EXCP_EXT;
432 env->pending_int &= ~INTERRUPT_EXT;
433 env->pending_int &= ~INTERRUPT_TOD;
434 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
435 cpu_inject_ext(cpu, 0x1005, 0, 0);
436 cs->exception_index = EXCP_EXT;
437 env->pending_int &= ~INTERRUPT_EXT;
438 env->pending_int &= ~INTERRUPT_TOD;
441 /* handle I/O interrupts */
442 if ((env->psw.mask & PSW_MASK_IO) &&
443 (cs->exception_index == -1)) {
444 if (env->pending_int & INTERRUPT_IO) {
445 cs->exception_index = EXCP_IO;
449 switch (cs->exception_index) {
450 case EXCP_PGM:
451 do_program_interrupt(env);
452 break;
453 case EXCP_SVC:
454 do_svc_interrupt(env);
455 break;
456 case EXCP_EXT:
457 do_ext_interrupt(env);
458 break;
459 case EXCP_IO:
460 do_io_interrupt(env);
461 break;
462 case EXCP_MCHK:
463 do_mchk_interrupt(env);
464 break;
466 cs->exception_index = -1;
468 if (!env->pending_int) {
469 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
473 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
475 if (interrupt_request & CPU_INTERRUPT_HARD) {
476 S390CPU *cpu = S390_CPU(cs);
477 CPUS390XState *env = &cpu->env;
479 if (env->ex_value) {
480 /* Execution of the target insn is indivisible from
481 the parent EXECUTE insn. */
482 return false;
484 if (env->psw.mask & PSW_MASK_EXT) {
485 s390_cpu_do_interrupt(cs);
486 return true;
489 return false;
492 void s390x_cpu_debug_excp_handler(CPUState *cs)
494 S390CPU *cpu = S390_CPU(cs);
495 CPUS390XState *env = &cpu->env;
496 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
498 if (wp_hit && wp_hit->flags & BP_CPU) {
499 /* FIXME: When the storage-alteration-space control bit is set,
500 the exception should only be triggered if the memory access
501 is done using an address space with the storage-alteration-event
502 bit set. We have no way to detect that with the current
503 watchpoint code. */
504 cs->watchpoint_hit = NULL;
506 env->per_address = env->psw.addr;
507 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
508 /* FIXME: We currently no way to detect the address space used
509 to trigger the watchpoint. For now just consider it is the
510 current default ASC. This turn to be true except when MVCP
511 and MVCS instrutions are not used. */
512 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
514 /* Remove all watchpoints to re-execute the code. A PER exception
515 will be triggered, it will call load_psw which will recompute
516 the watchpoints. */
517 cpu_watchpoint_remove_all(cs, BP_CPU);
518 cpu_loop_exit_noexc(cs);
522 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
523 this is only for the atomic operations, for which we want to raise a
524 specification exception. */
525 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
526 MMUAccessType access_type,
527 int mmu_idx, uintptr_t retaddr)
529 S390CPU *cpu = S390_CPU(cs);
530 CPUS390XState *env = &cpu->env;
532 if (retaddr) {
533 cpu_restore_state(cs, retaddr);
535 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
538 #endif /* CONFIG_USER_ONLY */