qemu_ram_block_host_offset
[qemu/ar7.git] / target / s390x / excp_helper.c
blobdfee2211114d57bdc4480423f7ce0df0792332d6
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "qemu/timer.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "hw/s390x/ioinst.h"
28 #include "exec/address-spaces.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #include "hw/s390x/s390_flic.h"
32 #endif
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
45 #endif
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState *cs)
55 cs->exception_index = -1;
58 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
59 int rw, int mmu_idx)
61 S390CPU *cpu = S390_CPU(cs);
63 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
86 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
87 int rw, int mmu_idx)
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 target_ulong vaddr, raddr;
92 uint64_t asc;
93 int prot;
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
98 vaddr = orig_vaddr;
100 if (mmu_idx < MMU_REAL_IDX) {
101 asc = cpu_mmu_idx_to_asc(mmu_idx);
102 /* 31-Bit mode */
103 if (!(env->psw.mask & PSW_MASK_64)) {
104 vaddr &= 0x7fffffff;
106 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
107 return 1;
109 } else if (mmu_idx == MMU_REAL_IDX) {
110 /* 31-Bit mode */
111 if (!(env->psw.mask & PSW_MASK_64)) {
112 vaddr &= 0x7fffffff;
114 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
115 return 1;
117 } else {
118 abort();
121 /* check out of RAM access */
122 if (!address_space_access_valid(&address_space_memory, raddr,
123 TARGET_PAGE_SIZE, rw)) {
124 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
125 (uint64_t)raddr, (uint64_t)ram_size);
126 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
127 return 1;
130 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
131 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
133 tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
134 mmu_idx, TARGET_PAGE_SIZE);
136 return 0;
139 static void do_program_interrupt(CPUS390XState *env)
141 uint64_t mask, addr;
142 LowCore *lowcore;
143 int ilen = env->int_pgm_ilen;
145 if (ilen == ILEN_AUTO) {
146 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
148 assert(ilen == 2 || ilen == 4 || ilen == 6);
150 switch (env->int_pgm_code) {
151 case PGM_PER:
152 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
153 break;
155 /* FALL THROUGH */
156 case PGM_OPERATION:
157 case PGM_PRIVILEGED:
158 case PGM_EXECUTE:
159 case PGM_PROTECTION:
160 case PGM_ADDRESSING:
161 case PGM_SPECIFICATION:
162 case PGM_DATA:
163 case PGM_FIXPT_OVERFLOW:
164 case PGM_FIXPT_DIVIDE:
165 case PGM_DEC_OVERFLOW:
166 case PGM_DEC_DIVIDE:
167 case PGM_HFP_EXP_OVERFLOW:
168 case PGM_HFP_EXP_UNDERFLOW:
169 case PGM_HFP_SIGNIFICANCE:
170 case PGM_HFP_DIVIDE:
171 case PGM_TRANS_SPEC:
172 case PGM_SPECIAL_OP:
173 case PGM_OPERAND:
174 case PGM_HFP_SQRT:
175 case PGM_PC_TRANS_SPEC:
176 case PGM_ALET_SPEC:
177 case PGM_MONITOR:
178 /* advance the PSW if our exception is not nullifying */
179 env->psw.addr += ilen;
180 break;
183 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
184 __func__, env->int_pgm_code, ilen);
186 lowcore = cpu_map_lowcore(env);
188 /* Signal PER events with the exception. */
189 if (env->per_perc_atmid) {
190 env->int_pgm_code |= PGM_PER;
191 lowcore->per_address = cpu_to_be64(env->per_address);
192 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
193 env->per_perc_atmid = 0;
196 lowcore->pgm_ilen = cpu_to_be16(ilen);
197 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
198 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
199 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
200 mask = be64_to_cpu(lowcore->program_new_psw.mask);
201 addr = be64_to_cpu(lowcore->program_new_psw.addr);
202 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
204 cpu_unmap_lowcore(lowcore);
206 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
207 env->int_pgm_code, ilen, env->psw.mask,
208 env->psw.addr);
210 load_psw(env, mask, addr);
213 static void do_svc_interrupt(CPUS390XState *env)
215 uint64_t mask, addr;
216 LowCore *lowcore;
218 lowcore = cpu_map_lowcore(env);
220 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
221 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
222 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
223 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
224 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
225 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
227 cpu_unmap_lowcore(lowcore);
229 load_psw(env, mask, addr);
231 /* When a PER event is pending, the PER exception has to happen
232 immediately after the SERVICE CALL one. */
233 if (env->per_perc_atmid) {
234 env->int_pgm_code = PGM_PER;
235 env->int_pgm_ilen = env->int_svc_ilen;
236 do_program_interrupt(env);
240 #define VIRTIO_SUBCODE_64 0x0D00
242 static void do_ext_interrupt(CPUS390XState *env)
244 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
245 S390CPU *cpu = s390_env_get_cpu(env);
246 uint64_t mask, addr;
247 uint16_t cpu_addr;
248 LowCore *lowcore;
250 if (!(env->psw.mask & PSW_MASK_EXT)) {
251 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
254 lowcore = cpu_map_lowcore(env);
256 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
257 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
258 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
259 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
260 g_assert(cpu_addr < S390_MAX_CPUS);
261 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
262 clear_bit(cpu_addr, env->emergency_signals);
263 if (bitmap_empty(env->emergency_signals, max_cpus)) {
264 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
266 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
267 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
268 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
269 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
270 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
271 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
272 (env->cregs[0] & CR0_CKC_SC)) {
273 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
274 lowcore->cpu_addr = 0;
275 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
276 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
277 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
278 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
279 lowcore->cpu_addr = 0;
280 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
281 } else if (qemu_s390_flic_has_service(flic) &&
282 (env->cregs[0] & CR0_SERVICE_SC)) {
283 uint32_t param;
285 param = qemu_s390_flic_dequeue_service(flic);
286 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
287 lowcore->ext_params = cpu_to_be32(param);
288 lowcore->cpu_addr = 0;
289 } else {
290 g_assert_not_reached();
293 mask = be64_to_cpu(lowcore->external_new_psw.mask);
294 addr = be64_to_cpu(lowcore->external_new_psw.addr);
295 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
296 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
298 cpu_unmap_lowcore(lowcore);
300 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
301 env->psw.mask, env->psw.addr);
303 load_psw(env, mask, addr);
306 static void do_io_interrupt(CPUS390XState *env)
308 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
309 uint64_t mask, addr;
310 QEMUS390FlicIO *io;
311 LowCore *lowcore;
313 g_assert(env->psw.mask & PSW_MASK_IO);
314 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
315 g_assert(io);
317 lowcore = cpu_map_lowcore(env);
319 lowcore->subchannel_id = cpu_to_be16(io->id);
320 lowcore->subchannel_nr = cpu_to_be16(io->nr);
321 lowcore->io_int_parm = cpu_to_be32(io->parm);
322 lowcore->io_int_word = cpu_to_be32(io->word);
323 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
324 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
325 mask = be64_to_cpu(lowcore->io_new_psw.mask);
326 addr = be64_to_cpu(lowcore->io_new_psw.addr);
328 cpu_unmap_lowcore(lowcore);
329 g_free(io);
331 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask,
332 env->psw.addr);
333 load_psw(env, mask, addr);
336 static void do_mchk_interrupt(CPUS390XState *env)
338 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
339 uint64_t mask, addr;
340 LowCore *lowcore;
341 int i;
343 /* for now we only support channel report machine checks (floating) */
344 g_assert(env->psw.mask & PSW_MASK_MCHECK);
345 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
347 qemu_s390_flic_dequeue_crw_mchk(flic);
349 lowcore = cpu_map_lowcore(env);
351 /* we are always in z/Architecture mode */
352 lowcore->ar_access_id = 1;
354 for (i = 0; i < 16; i++) {
355 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
356 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
357 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
358 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
360 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
361 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
362 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
363 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
364 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
366 lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
367 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
368 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
369 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
370 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
372 cpu_unmap_lowcore(lowcore);
374 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
375 env->psw.mask, env->psw.addr);
377 load_psw(env, mask, addr);
380 void s390_cpu_do_interrupt(CPUState *cs)
382 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
383 S390CPU *cpu = S390_CPU(cs);
384 CPUS390XState *env = &cpu->env;
385 bool stopped = false;
387 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
388 __func__, cs->exception_index, env->psw.addr);
390 try_deliver:
391 /* handle machine checks */
392 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
393 cs->exception_index = EXCP_MCHK;
395 /* handle external interrupts */
396 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
397 cs->exception_index = EXCP_EXT;
399 /* handle I/O interrupts */
400 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
401 cs->exception_index = EXCP_IO;
403 /* RESTART interrupt */
404 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
405 cs->exception_index = EXCP_RESTART;
407 /* STOP interrupt has least priority */
408 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
409 cs->exception_index = EXCP_STOP;
412 switch (cs->exception_index) {
413 case EXCP_PGM:
414 do_program_interrupt(env);
415 break;
416 case EXCP_SVC:
417 do_svc_interrupt(env);
418 break;
419 case EXCP_EXT:
420 do_ext_interrupt(env);
421 break;
422 case EXCP_IO:
423 do_io_interrupt(env);
424 break;
425 case EXCP_MCHK:
426 do_mchk_interrupt(env);
427 break;
428 case EXCP_RESTART:
429 do_restart_interrupt(env);
430 break;
431 case EXCP_STOP:
432 do_stop_interrupt(env);
433 stopped = true;
434 break;
437 if (cs->exception_index != -1 && !stopped) {
438 /* check if there are more pending interrupts to deliver */
439 cs->exception_index = -1;
440 goto try_deliver;
442 cs->exception_index = -1;
444 /* we might still have pending interrupts, but not deliverable */
445 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
446 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
449 /* WAIT PSW during interrupt injection or STOP interrupt */
450 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
451 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
452 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
453 } else if (cs->halted) {
454 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
455 s390_cpu_unhalt(cpu);
459 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
461 if (interrupt_request & CPU_INTERRUPT_HARD) {
462 S390CPU *cpu = S390_CPU(cs);
463 CPUS390XState *env = &cpu->env;
465 if (env->ex_value) {
466 /* Execution of the target insn is indivisible from
467 the parent EXECUTE insn. */
468 return false;
470 if (s390_cpu_has_int(cpu)) {
471 s390_cpu_do_interrupt(cs);
472 return true;
474 if (env->psw.mask & PSW_MASK_WAIT) {
475 /* Woken up because of a floating interrupt but it has already
476 * been delivered. Go back to sleep. */
477 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
480 return false;
483 void s390x_cpu_debug_excp_handler(CPUState *cs)
485 S390CPU *cpu = S390_CPU(cs);
486 CPUS390XState *env = &cpu->env;
487 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
489 if (wp_hit && wp_hit->flags & BP_CPU) {
490 /* FIXME: When the storage-alteration-space control bit is set,
491 the exception should only be triggered if the memory access
492 is done using an address space with the storage-alteration-event
493 bit set. We have no way to detect that with the current
494 watchpoint code. */
495 cs->watchpoint_hit = NULL;
497 env->per_address = env->psw.addr;
498 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
499 /* FIXME: We currently no way to detect the address space used
500 to trigger the watchpoint. For now just consider it is the
501 current default ASC. This turn to be true except when MVCP
502 and MVCS instrutions are not used. */
503 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
505 /* Remove all watchpoints to re-execute the code. A PER exception
506 will be triggered, it will call load_psw which will recompute
507 the watchpoints. */
508 cpu_watchpoint_remove_all(cs, BP_CPU);
509 cpu_loop_exit_noexc(cs);
513 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
514 this is only for the atomic operations, for which we want to raise a
515 specification exception. */
516 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
517 MMUAccessType access_type,
518 int mmu_idx, uintptr_t retaddr)
520 S390CPU *cpu = S390_CPU(cs);
521 CPUS390XState *env = &cpu->env;
523 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
526 #endif /* CONFIG_USER_ONLY */