i2c: pm_smbus: Add block transfer capability
[qemu/kevin.git] / target / s390x / excp_helper.c
blobf0ce60cff2f491e51b7a0caf06ea02eada39eca3
1 /*
2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "qemu/timer.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "hw/s390x/ioinst.h"
28 #include "exec/address-spaces.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #include "hw/s390x/s390_flic.h"
32 #endif
34 /* #define DEBUG_S390 */
35 /* #define DEBUG_S390_STDOUT */
37 #ifdef DEBUG_S390
38 #ifdef DEBUG_S390_STDOUT
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
41 if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0)
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
45 #endif
46 #else
47 #define DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 #if defined(CONFIG_USER_ONLY)
53 void s390_cpu_do_interrupt(CPUState *cs)
55 cs->exception_index = -1;
58 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
59 int rw, int mmu_idx)
61 S390CPU *cpu = S390_CPU(cs);
63 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO);
64 /* On real machines this value is dropped into LowMem. Since this
65 is userland, simply put this someplace that cpu_loop can find it. */
66 cpu->env.__excp_addr = address;
67 return 1;
70 #else /* !CONFIG_USER_ONLY */
72 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
74 switch (mmu_idx) {
75 case MMU_PRIMARY_IDX:
76 return PSW_ASC_PRIMARY;
77 case MMU_SECONDARY_IDX:
78 return PSW_ASC_SECONDARY;
79 case MMU_HOME_IDX:
80 return PSW_ASC_HOME;
81 default:
82 abort();
86 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
87 int rw, int mmu_idx)
89 S390CPU *cpu = S390_CPU(cs);
90 CPUS390XState *env = &cpu->env;
91 target_ulong vaddr, raddr;
92 uint64_t asc;
93 int prot;
95 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
96 __func__, orig_vaddr, rw, mmu_idx);
98 vaddr = orig_vaddr;
100 if (mmu_idx < MMU_REAL_IDX) {
101 asc = cpu_mmu_idx_to_asc(mmu_idx);
102 /* 31-Bit mode */
103 if (!(env->psw.mask & PSW_MASK_64)) {
104 vaddr &= 0x7fffffff;
106 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
107 return 1;
109 } else if (mmu_idx == MMU_REAL_IDX) {
110 /* 31-Bit mode */
111 if (!(env->psw.mask & PSW_MASK_64)) {
112 vaddr &= 0x7fffffff;
114 if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
115 return 1;
117 } else {
118 abort();
121 /* check out of RAM access */
122 if (!address_space_access_valid(&address_space_memory, raddr,
123 TARGET_PAGE_SIZE, rw,
124 MEMTXATTRS_UNSPECIFIED)) {
125 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
126 (uint64_t)raddr, (uint64_t)ram_size);
127 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
128 return 1;
131 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
132 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
134 tlb_set_page(cs, orig_vaddr & TARGET_PAGE_MASK, raddr, prot,
135 mmu_idx, TARGET_PAGE_SIZE);
137 return 0;
140 static void do_program_interrupt(CPUS390XState *env)
142 uint64_t mask, addr;
143 LowCore *lowcore;
144 int ilen = env->int_pgm_ilen;
146 if (ilen == ILEN_AUTO) {
147 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
149 assert(ilen == 2 || ilen == 4 || ilen == 6);
151 switch (env->int_pgm_code) {
152 case PGM_PER:
153 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
154 break;
156 /* FALL THROUGH */
157 case PGM_OPERATION:
158 case PGM_PRIVILEGED:
159 case PGM_EXECUTE:
160 case PGM_PROTECTION:
161 case PGM_ADDRESSING:
162 case PGM_SPECIFICATION:
163 case PGM_DATA:
164 case PGM_FIXPT_OVERFLOW:
165 case PGM_FIXPT_DIVIDE:
166 case PGM_DEC_OVERFLOW:
167 case PGM_DEC_DIVIDE:
168 case PGM_HFP_EXP_OVERFLOW:
169 case PGM_HFP_EXP_UNDERFLOW:
170 case PGM_HFP_SIGNIFICANCE:
171 case PGM_HFP_DIVIDE:
172 case PGM_TRANS_SPEC:
173 case PGM_SPECIAL_OP:
174 case PGM_OPERAND:
175 case PGM_HFP_SQRT:
176 case PGM_PC_TRANS_SPEC:
177 case PGM_ALET_SPEC:
178 case PGM_MONITOR:
179 /* advance the PSW if our exception is not nullifying */
180 env->psw.addr += ilen;
181 break;
184 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
185 __func__, env->int_pgm_code, ilen);
187 lowcore = cpu_map_lowcore(env);
189 /* Signal PER events with the exception. */
190 if (env->per_perc_atmid) {
191 env->int_pgm_code |= PGM_PER;
192 lowcore->per_address = cpu_to_be64(env->per_address);
193 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
194 env->per_perc_atmid = 0;
197 lowcore->pgm_ilen = cpu_to_be16(ilen);
198 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
199 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
200 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
201 mask = be64_to_cpu(lowcore->program_new_psw.mask);
202 addr = be64_to_cpu(lowcore->program_new_psw.addr);
203 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
205 cpu_unmap_lowcore(lowcore);
207 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
208 env->int_pgm_code, ilen, env->psw.mask,
209 env->psw.addr);
211 load_psw(env, mask, addr);
214 static void do_svc_interrupt(CPUS390XState *env)
216 uint64_t mask, addr;
217 LowCore *lowcore;
219 lowcore = cpu_map_lowcore(env);
221 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
222 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
223 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
224 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
225 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
226 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
228 cpu_unmap_lowcore(lowcore);
230 load_psw(env, mask, addr);
232 /* When a PER event is pending, the PER exception has to happen
233 immediately after the SERVICE CALL one. */
234 if (env->per_perc_atmid) {
235 env->int_pgm_code = PGM_PER;
236 env->int_pgm_ilen = env->int_svc_ilen;
237 do_program_interrupt(env);
241 #define VIRTIO_SUBCODE_64 0x0D00
243 static void do_ext_interrupt(CPUS390XState *env)
245 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
246 S390CPU *cpu = s390_env_get_cpu(env);
247 uint64_t mask, addr;
248 uint16_t cpu_addr;
249 LowCore *lowcore;
251 if (!(env->psw.mask & PSW_MASK_EXT)) {
252 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
255 lowcore = cpu_map_lowcore(env);
257 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
258 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
259 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
260 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
261 g_assert(cpu_addr < S390_MAX_CPUS);
262 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
263 clear_bit(cpu_addr, env->emergency_signals);
264 if (bitmap_empty(env->emergency_signals, max_cpus)) {
265 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
267 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
268 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
269 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
270 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
271 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
272 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
273 (env->cregs[0] & CR0_CKC_SC)) {
274 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
275 lowcore->cpu_addr = 0;
276 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
277 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
278 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
279 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
280 lowcore->cpu_addr = 0;
281 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
282 } else if (qemu_s390_flic_has_service(flic) &&
283 (env->cregs[0] & CR0_SERVICE_SC)) {
284 uint32_t param;
286 param = qemu_s390_flic_dequeue_service(flic);
287 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
288 lowcore->ext_params = cpu_to_be32(param);
289 lowcore->cpu_addr = 0;
290 } else {
291 g_assert_not_reached();
294 mask = be64_to_cpu(lowcore->external_new_psw.mask);
295 addr = be64_to_cpu(lowcore->external_new_psw.addr);
296 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
297 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
299 cpu_unmap_lowcore(lowcore);
301 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
302 env->psw.mask, env->psw.addr);
304 load_psw(env, mask, addr);
307 static void do_io_interrupt(CPUS390XState *env)
309 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
310 uint64_t mask, addr;
311 QEMUS390FlicIO *io;
312 LowCore *lowcore;
314 g_assert(env->psw.mask & PSW_MASK_IO);
315 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
316 g_assert(io);
318 lowcore = cpu_map_lowcore(env);
320 lowcore->subchannel_id = cpu_to_be16(io->id);
321 lowcore->subchannel_nr = cpu_to_be16(io->nr);
322 lowcore->io_int_parm = cpu_to_be32(io->parm);
323 lowcore->io_int_word = cpu_to_be32(io->word);
324 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
325 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
326 mask = be64_to_cpu(lowcore->io_new_psw.mask);
327 addr = be64_to_cpu(lowcore->io_new_psw.addr);
329 cpu_unmap_lowcore(lowcore);
330 g_free(io);
332 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask,
333 env->psw.addr);
334 load_psw(env, mask, addr);
337 static void do_mchk_interrupt(CPUS390XState *env)
339 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
340 uint64_t mask, addr;
341 LowCore *lowcore;
342 int i;
344 /* for now we only support channel report machine checks (floating) */
345 g_assert(env->psw.mask & PSW_MASK_MCHECK);
346 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
348 qemu_s390_flic_dequeue_crw_mchk(flic);
350 lowcore = cpu_map_lowcore(env);
352 /* we are always in z/Architecture mode */
353 lowcore->ar_access_id = 1;
355 for (i = 0; i < 16; i++) {
356 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
357 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
358 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
359 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
361 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
362 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
363 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
364 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
365 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
367 lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
368 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
369 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
370 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
371 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
373 cpu_unmap_lowcore(lowcore);
375 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
376 env->psw.mask, env->psw.addr);
378 load_psw(env, mask, addr);
381 void s390_cpu_do_interrupt(CPUState *cs)
383 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
384 S390CPU *cpu = S390_CPU(cs);
385 CPUS390XState *env = &cpu->env;
386 bool stopped = false;
388 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
389 __func__, cs->exception_index, env->psw.addr);
391 try_deliver:
392 /* handle machine checks */
393 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
394 cs->exception_index = EXCP_MCHK;
396 /* handle external interrupts */
397 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
398 cs->exception_index = EXCP_EXT;
400 /* handle I/O interrupts */
401 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
402 cs->exception_index = EXCP_IO;
404 /* RESTART interrupt */
405 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
406 cs->exception_index = EXCP_RESTART;
408 /* STOP interrupt has least priority */
409 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
410 cs->exception_index = EXCP_STOP;
413 switch (cs->exception_index) {
414 case EXCP_PGM:
415 do_program_interrupt(env);
416 break;
417 case EXCP_SVC:
418 do_svc_interrupt(env);
419 break;
420 case EXCP_EXT:
421 do_ext_interrupt(env);
422 break;
423 case EXCP_IO:
424 do_io_interrupt(env);
425 break;
426 case EXCP_MCHK:
427 do_mchk_interrupt(env);
428 break;
429 case EXCP_RESTART:
430 do_restart_interrupt(env);
431 break;
432 case EXCP_STOP:
433 do_stop_interrupt(env);
434 stopped = true;
435 break;
438 if (cs->exception_index != -1 && !stopped) {
439 /* check if there are more pending interrupts to deliver */
440 cs->exception_index = -1;
441 goto try_deliver;
443 cs->exception_index = -1;
445 /* we might still have pending interrupts, but not deliverable */
446 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
447 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
450 /* WAIT PSW during interrupt injection or STOP interrupt */
451 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
452 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
453 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
454 } else if (cs->halted) {
455 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
456 s390_cpu_unhalt(cpu);
460 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 S390CPU *cpu = S390_CPU(cs);
464 CPUS390XState *env = &cpu->env;
466 if (env->ex_value) {
467 /* Execution of the target insn is indivisible from
468 the parent EXECUTE insn. */
469 return false;
471 if (s390_cpu_has_int(cpu)) {
472 s390_cpu_do_interrupt(cs);
473 return true;
475 if (env->psw.mask & PSW_MASK_WAIT) {
476 /* Woken up because of a floating interrupt but it has already
477 * been delivered. Go back to sleep. */
478 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
481 return false;
484 void s390x_cpu_debug_excp_handler(CPUState *cs)
486 S390CPU *cpu = S390_CPU(cs);
487 CPUS390XState *env = &cpu->env;
488 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
490 if (wp_hit && wp_hit->flags & BP_CPU) {
491 /* FIXME: When the storage-alteration-space control bit is set,
492 the exception should only be triggered if the memory access
493 is done using an address space with the storage-alteration-event
494 bit set. We have no way to detect that with the current
495 watchpoint code. */
496 cs->watchpoint_hit = NULL;
498 env->per_address = env->psw.addr;
499 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
500 /* FIXME: We currently no way to detect the address space used
501 to trigger the watchpoint. For now just consider it is the
502 current default ASC. This turn to be true except when MVCP
503 and MVCS instrutions are not used. */
504 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
506 /* Remove all watchpoints to re-execute the code. A PER exception
507 will be triggered, it will call load_psw which will recompute
508 the watchpoints. */
509 cpu_watchpoint_remove_all(cs, BP_CPU);
510 cpu_loop_exit_noexc(cs);
514 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
515 this is only for the atomic operations, for which we want to raise a
516 specification exception. */
517 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
518 MMUAccessType access_type,
519 int mmu_idx, uintptr_t retaddr)
521 S390CPU *cpu = S390_CPU(cs);
522 CPUS390XState *env = &cpu->env;
524 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
527 #endif /* CONFIG_USER_ONLY */