spapr_drc: abort if object_property_add_child() fails
[qemu/kevin.git] / target / s390x / excp_helper.c
blobd1833772d566b7fccbb054c9a7cf1da4ac39a7e6
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "qemu/timer.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "hw/s390x/ioinst.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "sysemu/sysemu.h"
30 #endif
32 /* #define DEBUG_S390 */
33 /* #define DEBUG_S390_STDOUT */
35 #ifdef DEBUG_S390
36 #ifdef DEBUG_S390_STDOUT
37 #define DPRINTF(fmt, ...) \
38 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
39 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
40 #else
41 #define DPRINTF(fmt, ...) \
42 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
43 #endif
44 #else
45 #define DPRINTF(fmt, ...) \
46 do { } while (0)
47 #endif
49 #if defined(CONFIG_USER_ONLY)
51 void s390_cpu_do_interrupt(CPUState *cs)
53 cs->exception_index = -1;
56 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
57 int rw, int mmu_idx)
59 S390CPU *cpu = S390_CPU(cs);
61 cs->exception_index = EXCP_PGM;
62 cpu->env.int_pgm_code = PGM_ADDRESSING;
63 /* On real machines this value is dropped into LowMem. Since this
64 is userland, simply put this someplace that cpu_loop can find it. */
65 cpu->env.__excp_addr = address;
66 return 1;
69 #else /* !CONFIG_USER_ONLY */
71 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
72 int rw, int mmu_idx)
74 S390CPU *cpu = S390_CPU(cs);
75 CPUS390XState *env = &cpu->env;
76 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
77 target_ulong vaddr, raddr;
78 int prot;
80 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
81 __func__, orig_vaddr, rw, mmu_idx);
83 orig_vaddr &= TARGET_PAGE_MASK;
84 vaddr = orig_vaddr;
86 /* 31-Bit mode */
87 if (!(env->psw.mask & PSW_MASK_64)) {
88 vaddr &= 0x7fffffff;
91 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
92 /* Translation ended in exception */
93 return 1;
96 /* check out of RAM access */
97 if (raddr > ram_size) {
98 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
99 (uint64_t)raddr, (uint64_t)ram_size);
100 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
101 return 1;
104 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
105 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
107 tlb_set_page(cs, orig_vaddr, raddr, prot,
108 mmu_idx, TARGET_PAGE_SIZE);
110 return 0;
113 static void do_program_interrupt(CPUS390XState *env)
115 uint64_t mask, addr;
116 LowCore *lowcore;
117 int ilen = env->int_pgm_ilen;
119 if (ilen == ILEN_AUTO) {
120 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
122 assert(ilen == 2 || ilen == 4 || ilen == 6);
124 switch (env->int_pgm_code) {
125 case PGM_PER:
126 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
127 break;
129 /* FALL THROUGH */
130 case PGM_OPERATION:
131 case PGM_PRIVILEGED:
132 case PGM_EXECUTE:
133 case PGM_PROTECTION:
134 case PGM_ADDRESSING:
135 case PGM_SPECIFICATION:
136 case PGM_DATA:
137 case PGM_FIXPT_OVERFLOW:
138 case PGM_FIXPT_DIVIDE:
139 case PGM_DEC_OVERFLOW:
140 case PGM_DEC_DIVIDE:
141 case PGM_HFP_EXP_OVERFLOW:
142 case PGM_HFP_EXP_UNDERFLOW:
143 case PGM_HFP_SIGNIFICANCE:
144 case PGM_HFP_DIVIDE:
145 case PGM_TRANS_SPEC:
146 case PGM_SPECIAL_OP:
147 case PGM_OPERAND:
148 case PGM_HFP_SQRT:
149 case PGM_PC_TRANS_SPEC:
150 case PGM_ALET_SPEC:
151 case PGM_MONITOR:
152 /* advance the PSW if our exception is not nullifying */
153 env->psw.addr += ilen;
154 break;
157 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
158 __func__, env->int_pgm_code, ilen);
160 lowcore = cpu_map_lowcore(env);
162 /* Signal PER events with the exception. */
163 if (env->per_perc_atmid) {
164 env->int_pgm_code |= PGM_PER;
165 lowcore->per_address = cpu_to_be64(env->per_address);
166 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
167 env->per_perc_atmid = 0;
170 lowcore->pgm_ilen = cpu_to_be16(ilen);
171 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
172 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
173 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
174 mask = be64_to_cpu(lowcore->program_new_psw.mask);
175 addr = be64_to_cpu(lowcore->program_new_psw.addr);
176 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
178 cpu_unmap_lowcore(lowcore);
180 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
181 env->int_pgm_code, ilen, env->psw.mask,
182 env->psw.addr);
184 load_psw(env, mask, addr);
187 static void do_svc_interrupt(CPUS390XState *env)
189 uint64_t mask, addr;
190 LowCore *lowcore;
192 lowcore = cpu_map_lowcore(env);
194 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
195 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
196 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
197 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
198 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
199 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
201 cpu_unmap_lowcore(lowcore);
203 load_psw(env, mask, addr);
205 /* When a PER event is pending, the PER exception has to happen
206 immediately after the SERVICE CALL one. */
207 if (env->per_perc_atmid) {
208 env->int_pgm_code = PGM_PER;
209 env->int_pgm_ilen = env->int_svc_ilen;
210 do_program_interrupt(env);
214 #define VIRTIO_SUBCODE_64 0x0D00
216 static void do_ext_interrupt(CPUS390XState *env)
218 S390CPU *cpu = s390_env_get_cpu(env);
219 uint64_t mask, addr;
220 LowCore *lowcore;
221 ExtQueue *q;
223 if (!(env->psw.mask & PSW_MASK_EXT)) {
224 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
227 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
228 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
231 q = &env->ext_queue[env->ext_index];
232 lowcore = cpu_map_lowcore(env);
234 lowcore->ext_int_code = cpu_to_be16(q->code);
235 lowcore->ext_params = cpu_to_be32(q->param);
236 lowcore->ext_params2 = cpu_to_be64(q->param64);
237 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
238 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
239 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
240 mask = be64_to_cpu(lowcore->external_new_psw.mask);
241 addr = be64_to_cpu(lowcore->external_new_psw.addr);
243 cpu_unmap_lowcore(lowcore);
245 env->ext_index--;
246 if (env->ext_index == -1) {
247 env->pending_int &= ~INTERRUPT_EXT;
250 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
251 env->psw.mask, env->psw.addr);
253 load_psw(env, mask, addr);
256 static void do_io_interrupt(CPUS390XState *env)
258 S390CPU *cpu = s390_env_get_cpu(env);
259 LowCore *lowcore;
260 IOIntQueue *q;
261 uint8_t isc;
262 int disable = 1;
263 int found = 0;
265 if (!(env->psw.mask & PSW_MASK_IO)) {
266 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
269 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
270 uint64_t isc_bits;
272 if (env->io_index[isc] < 0) {
273 continue;
275 if (env->io_index[isc] >= MAX_IO_QUEUE) {
276 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
277 isc, env->io_index[isc]);
280 q = &env->io_queue[env->io_index[isc]][isc];
281 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
282 if (!(env->cregs[6] & isc_bits)) {
283 disable = 0;
284 continue;
286 if (!found) {
287 uint64_t mask, addr;
289 found = 1;
290 lowcore = cpu_map_lowcore(env);
292 lowcore->subchannel_id = cpu_to_be16(q->id);
293 lowcore->subchannel_nr = cpu_to_be16(q->nr);
294 lowcore->io_int_parm = cpu_to_be32(q->parm);
295 lowcore->io_int_word = cpu_to_be32(q->word);
296 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
297 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
298 mask = be64_to_cpu(lowcore->io_new_psw.mask);
299 addr = be64_to_cpu(lowcore->io_new_psw.addr);
301 cpu_unmap_lowcore(lowcore);
303 env->io_index[isc]--;
305 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
306 env->psw.mask, env->psw.addr);
307 load_psw(env, mask, addr);
309 if (env->io_index[isc] >= 0) {
310 disable = 0;
312 continue;
315 if (disable) {
316 env->pending_int &= ~INTERRUPT_IO;
321 static void do_mchk_interrupt(CPUS390XState *env)
323 S390CPU *cpu = s390_env_get_cpu(env);
324 uint64_t mask, addr;
325 LowCore *lowcore;
326 MchkQueue *q;
327 int i;
329 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
330 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
333 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
334 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
337 q = &env->mchk_queue[env->mchk_index];
339 if (q->type != 1) {
340 /* Don't know how to handle this... */
341 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
343 if (!(env->cregs[14] & (1 << 28))) {
344 /* CRW machine checks disabled */
345 return;
348 lowcore = cpu_map_lowcore(env);
350 for (i = 0; i < 16; i++) {
351 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
352 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
353 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
354 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
356 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
357 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
358 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
359 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
360 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
361 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
362 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
364 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
365 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
366 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
367 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
368 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
369 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
371 cpu_unmap_lowcore(lowcore);
373 env->mchk_index--;
374 if (env->mchk_index == -1) {
375 env->pending_int &= ~INTERRUPT_MCHK;
378 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
379 env->psw.mask, env->psw.addr);
381 load_psw(env, mask, addr);
384 void s390_cpu_do_interrupt(CPUState *cs)
386 S390CPU *cpu = S390_CPU(cs);
387 CPUS390XState *env = &cpu->env;
389 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
390 __func__, cs->exception_index, env->psw.addr);
392 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
393 /* handle machine checks */
394 if ((env->psw.mask & PSW_MASK_MCHECK) &&
395 (cs->exception_index == -1)) {
396 if (env->pending_int & INTERRUPT_MCHK) {
397 cs->exception_index = EXCP_MCHK;
400 /* handle external interrupts */
401 if ((env->psw.mask & PSW_MASK_EXT) &&
402 cs->exception_index == -1) {
403 if (env->pending_int & INTERRUPT_EXT) {
404 /* code is already in env */
405 cs->exception_index = EXCP_EXT;
406 } else if (env->pending_int & INTERRUPT_TOD) {
407 cpu_inject_ext(cpu, 0x1004, 0, 0);
408 cs->exception_index = EXCP_EXT;
409 env->pending_int &= ~INTERRUPT_EXT;
410 env->pending_int &= ~INTERRUPT_TOD;
411 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
412 cpu_inject_ext(cpu, 0x1005, 0, 0);
413 cs->exception_index = EXCP_EXT;
414 env->pending_int &= ~INTERRUPT_EXT;
415 env->pending_int &= ~INTERRUPT_TOD;
418 /* handle I/O interrupts */
419 if ((env->psw.mask & PSW_MASK_IO) &&
420 (cs->exception_index == -1)) {
421 if (env->pending_int & INTERRUPT_IO) {
422 cs->exception_index = EXCP_IO;
426 switch (cs->exception_index) {
427 case EXCP_PGM:
428 do_program_interrupt(env);
429 break;
430 case EXCP_SVC:
431 do_svc_interrupt(env);
432 break;
433 case EXCP_EXT:
434 do_ext_interrupt(env);
435 break;
436 case EXCP_IO:
437 do_io_interrupt(env);
438 break;
439 case EXCP_MCHK:
440 do_mchk_interrupt(env);
441 break;
443 cs->exception_index = -1;
445 if (!env->pending_int) {
446 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
450 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
452 if (interrupt_request & CPU_INTERRUPT_HARD) {
453 S390CPU *cpu = S390_CPU(cs);
454 CPUS390XState *env = &cpu->env;
456 if (env->ex_value) {
457 /* Execution of the target insn is indivisible from
458 the parent EXECUTE insn. */
459 return false;
461 if (env->psw.mask & PSW_MASK_EXT) {
462 s390_cpu_do_interrupt(cs);
463 return true;
466 return false;
469 void s390x_cpu_debug_excp_handler(CPUState *cs)
471 S390CPU *cpu = S390_CPU(cs);
472 CPUS390XState *env = &cpu->env;
473 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
475 if (wp_hit && wp_hit->flags & BP_CPU) {
476 /* FIXME: When the storage-alteration-space control bit is set,
477 the exception should only be triggered if the memory access
478 is done using an address space with the storage-alteration-event
479 bit set. We have no way to detect that with the current
480 watchpoint code. */
481 cs->watchpoint_hit = NULL;
483 env->per_address = env->psw.addr;
484 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
485 /* FIXME: We currently no way to detect the address space used
486 to trigger the watchpoint. For now just consider it is the
487 current default ASC. This turn to be true except when MVCP
488 and MVCS instrutions are not used. */
489 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
491 /* Remove all watchpoints to re-execute the code. A PER exception
492 will be triggered, it will call load_psw which will recompute
493 the watchpoints. */
494 cpu_watchpoint_remove_all(cs, BP_CPU);
495 cpu_loop_exit_noexc(cs);
499 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
500 this is only for the atomic operations, for which we want to raise a
501 specification exception. */
502 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
503 MMUAccessType access_type,
504 int mmu_idx, uintptr_t retaddr)
506 S390CPU *cpu = S390_CPU(cs);
507 CPUS390XState *env = &cpu->env;
509 if (retaddr) {
510 cpu_restore_state(cs, retaddr);
512 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
515 #endif /* CONFIG_USER_ONLY */