target/s390x: Move s390_cpu_dump_state() to helper.c
[qemu/ar7.git] / target / s390x / helper.c
blob30ac2a77b3a71e37162ebdca474306614ebc446c
1 /*
2 * S/390 helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #endif
33 //#define DEBUG_S390
34 //#define DEBUG_S390_STDOUT
36 #ifdef DEBUG_S390
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
41 #else
42 #define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44 #endif
45 #else
46 #define DPRINTF(fmt, ...) \
47 do { } while (0)
48 #endif
51 #ifndef CONFIG_USER_ONLY
52 void s390x_tod_timer(void *opaque)
54 S390CPU *cpu = opaque;
55 CPUS390XState *env = &cpu->env;
57 env->pending_int |= INTERRUPT_TOD;
58 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
61 void s390x_cpu_timer(void *opaque)
63 S390CPU *cpu = opaque;
64 CPUS390XState *env = &cpu->env;
66 env->pending_int |= INTERRUPT_CPUTIMER;
67 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
69 #endif
71 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
73 static bool features_parsed;
74 char *name, *features;
75 const char *typename;
76 ObjectClass *oc;
77 CPUClass *cc;
79 name = g_strdup(cpu_model);
80 features = strchr(name, ',');
81 if (features) {
82 features[0] = 0;
83 features++;
86 oc = cpu_class_by_name(TYPE_S390_CPU, name);
87 if (!oc) {
88 error_setg(errp, "Unknown CPU definition \'%s\'", name);
89 g_free(name);
90 return NULL;
92 typename = object_class_get_name(oc);
94 if (!features_parsed) {
95 features_parsed = true;
96 cc = CPU_CLASS(oc);
97 cc->parse_features(typename, features, errp);
99 g_free(name);
101 if (*errp) {
102 return NULL;
104 return S390_CPU(CPU(object_new(typename)));
107 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
109 S390CPU *cpu;
110 Error *err = NULL;
112 cpu = cpu_s390x_create(cpu_model, &err);
113 if (err != NULL) {
114 goto out;
117 object_property_set_int(OBJECT(cpu), id, "id", &err);
118 if (err != NULL) {
119 goto out;
121 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
123 out:
124 if (err) {
125 error_propagate(errp, err);
126 object_unref(OBJECT(cpu));
127 cpu = NULL;
129 return cpu;
132 S390CPU *cpu_s390x_init(const char *cpu_model)
134 Error *err = NULL;
135 S390CPU *cpu;
136 /* Use to track CPU ID for linux-user only */
137 static int64_t next_cpu_id;
139 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
140 if (err) {
141 error_report_err(err);
143 return cpu;
146 #if defined(CONFIG_USER_ONLY)
148 void s390_cpu_do_interrupt(CPUState *cs)
150 cs->exception_index = -1;
153 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
154 int rw, int mmu_idx)
156 S390CPU *cpu = S390_CPU(cs);
158 cs->exception_index = EXCP_PGM;
159 cpu->env.int_pgm_code = PGM_ADDRESSING;
160 /* On real machines this value is dropped into LowMem. Since this
161 is userland, simply put this someplace that cpu_loop can find it. */
162 cpu->env.__excp_addr = address;
163 return 1;
166 #else /* !CONFIG_USER_ONLY */
168 /* Ensure to exit the TB after this call! */
169 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
171 CPUState *cs = CPU(s390_env_get_cpu(env));
173 cs->exception_index = EXCP_PGM;
174 env->int_pgm_code = code;
175 env->int_pgm_ilen = ilen;
178 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
179 int rw, int mmu_idx)
181 S390CPU *cpu = S390_CPU(cs);
182 CPUS390XState *env = &cpu->env;
183 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
184 target_ulong vaddr, raddr;
185 int prot;
187 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
188 __func__, orig_vaddr, rw, mmu_idx);
190 orig_vaddr &= TARGET_PAGE_MASK;
191 vaddr = orig_vaddr;
193 /* 31-Bit mode */
194 if (!(env->psw.mask & PSW_MASK_64)) {
195 vaddr &= 0x7fffffff;
198 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
199 /* Translation ended in exception */
200 return 1;
203 /* check out of RAM access */
204 if (raddr > ram_size) {
205 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
206 (uint64_t)raddr, (uint64_t)ram_size);
207 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
208 return 1;
211 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
212 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
214 tlb_set_page(cs, orig_vaddr, raddr, prot,
215 mmu_idx, TARGET_PAGE_SIZE);
217 return 0;
220 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
222 S390CPU *cpu = S390_CPU(cs);
223 CPUS390XState *env = &cpu->env;
224 target_ulong raddr;
225 int prot;
226 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
228 /* 31-Bit mode */
229 if (!(env->psw.mask & PSW_MASK_64)) {
230 vaddr &= 0x7fffffff;
233 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
234 return -1;
236 return raddr;
239 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
241 hwaddr phys_addr;
242 target_ulong page;
244 page = vaddr & TARGET_PAGE_MASK;
245 phys_addr = cpu_get_phys_page_debug(cs, page);
246 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
248 return phys_addr;
251 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
253 uint64_t old_mask = env->psw.mask;
255 env->psw.addr = addr;
256 env->psw.mask = mask;
257 if (tcg_enabled()) {
258 env->cc_op = (mask >> 44) & 3;
261 if ((old_mask ^ mask) & PSW_MASK_PER) {
262 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
265 if (mask & PSW_MASK_WAIT) {
266 S390CPU *cpu = s390_env_get_cpu(env);
267 if (s390_cpu_halt(cpu) == 0) {
268 #ifndef CONFIG_USER_ONLY
269 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
270 #endif
275 static uint64_t get_psw_mask(CPUS390XState *env)
277 uint64_t r = env->psw.mask;
279 if (tcg_enabled()) {
280 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
281 env->cc_vr);
283 r &= ~PSW_MASK_CC;
284 assert(!(env->cc_op & ~3));
285 r |= (uint64_t)env->cc_op << 44;
288 return r;
291 static LowCore *cpu_map_lowcore(CPUS390XState *env)
293 S390CPU *cpu = s390_env_get_cpu(env);
294 LowCore *lowcore;
295 hwaddr len = sizeof(LowCore);
297 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
299 if (len < sizeof(LowCore)) {
300 cpu_abort(CPU(cpu), "Could not map lowcore\n");
303 return lowcore;
306 static void cpu_unmap_lowcore(LowCore *lowcore)
308 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
311 void do_restart_interrupt(CPUS390XState *env)
313 uint64_t mask, addr;
314 LowCore *lowcore;
316 lowcore = cpu_map_lowcore(env);
318 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
319 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
320 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
321 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
323 cpu_unmap_lowcore(lowcore);
325 load_psw(env, mask, addr);
328 static void do_program_interrupt(CPUS390XState *env)
330 uint64_t mask, addr;
331 LowCore *lowcore;
332 int ilen = env->int_pgm_ilen;
334 if (ilen == ILEN_AUTO) {
335 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
337 assert(ilen == 2 || ilen == 4 || ilen == 6);
339 switch (env->int_pgm_code) {
340 case PGM_PER:
341 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
342 break;
344 /* FALL THROUGH */
345 case PGM_OPERATION:
346 case PGM_PRIVILEGED:
347 case PGM_EXECUTE:
348 case PGM_PROTECTION:
349 case PGM_ADDRESSING:
350 case PGM_SPECIFICATION:
351 case PGM_DATA:
352 case PGM_FIXPT_OVERFLOW:
353 case PGM_FIXPT_DIVIDE:
354 case PGM_DEC_OVERFLOW:
355 case PGM_DEC_DIVIDE:
356 case PGM_HFP_EXP_OVERFLOW:
357 case PGM_HFP_EXP_UNDERFLOW:
358 case PGM_HFP_SIGNIFICANCE:
359 case PGM_HFP_DIVIDE:
360 case PGM_TRANS_SPEC:
361 case PGM_SPECIAL_OP:
362 case PGM_OPERAND:
363 case PGM_HFP_SQRT:
364 case PGM_PC_TRANS_SPEC:
365 case PGM_ALET_SPEC:
366 case PGM_MONITOR:
367 /* advance the PSW if our exception is not nullifying */
368 env->psw.addr += ilen;
369 break;
372 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
373 __func__, env->int_pgm_code, ilen);
375 lowcore = cpu_map_lowcore(env);
377 /* Signal PER events with the exception. */
378 if (env->per_perc_atmid) {
379 env->int_pgm_code |= PGM_PER;
380 lowcore->per_address = cpu_to_be64(env->per_address);
381 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
382 env->per_perc_atmid = 0;
385 lowcore->pgm_ilen = cpu_to_be16(ilen);
386 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
387 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
388 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
389 mask = be64_to_cpu(lowcore->program_new_psw.mask);
390 addr = be64_to_cpu(lowcore->program_new_psw.addr);
391 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
393 cpu_unmap_lowcore(lowcore);
395 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
396 env->int_pgm_code, ilen, env->psw.mask,
397 env->psw.addr);
399 load_psw(env, mask, addr);
402 static void do_svc_interrupt(CPUS390XState *env)
404 uint64_t mask, addr;
405 LowCore *lowcore;
407 lowcore = cpu_map_lowcore(env);
409 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
410 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
411 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
412 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
413 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
414 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
416 cpu_unmap_lowcore(lowcore);
418 load_psw(env, mask, addr);
420 /* When a PER event is pending, the PER exception has to happen
421 immediately after the SERVICE CALL one. */
422 if (env->per_perc_atmid) {
423 env->int_pgm_code = PGM_PER;
424 env->int_pgm_ilen = env->int_svc_ilen;
425 do_program_interrupt(env);
429 #define VIRTIO_SUBCODE_64 0x0D00
431 static void do_ext_interrupt(CPUS390XState *env)
433 S390CPU *cpu = s390_env_get_cpu(env);
434 uint64_t mask, addr;
435 LowCore *lowcore;
436 ExtQueue *q;
438 if (!(env->psw.mask & PSW_MASK_EXT)) {
439 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
442 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
443 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
446 q = &env->ext_queue[env->ext_index];
447 lowcore = cpu_map_lowcore(env);
449 lowcore->ext_int_code = cpu_to_be16(q->code);
450 lowcore->ext_params = cpu_to_be32(q->param);
451 lowcore->ext_params2 = cpu_to_be64(q->param64);
452 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
453 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
454 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
455 mask = be64_to_cpu(lowcore->external_new_psw.mask);
456 addr = be64_to_cpu(lowcore->external_new_psw.addr);
458 cpu_unmap_lowcore(lowcore);
460 env->ext_index--;
461 if (env->ext_index == -1) {
462 env->pending_int &= ~INTERRUPT_EXT;
465 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
466 env->psw.mask, env->psw.addr);
468 load_psw(env, mask, addr);
471 static void do_io_interrupt(CPUS390XState *env)
473 S390CPU *cpu = s390_env_get_cpu(env);
474 LowCore *lowcore;
475 IOIntQueue *q;
476 uint8_t isc;
477 int disable = 1;
478 int found = 0;
480 if (!(env->psw.mask & PSW_MASK_IO)) {
481 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
484 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
485 uint64_t isc_bits;
487 if (env->io_index[isc] < 0) {
488 continue;
490 if (env->io_index[isc] >= MAX_IO_QUEUE) {
491 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
492 isc, env->io_index[isc]);
495 q = &env->io_queue[env->io_index[isc]][isc];
496 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
497 if (!(env->cregs[6] & isc_bits)) {
498 disable = 0;
499 continue;
501 if (!found) {
502 uint64_t mask, addr;
504 found = 1;
505 lowcore = cpu_map_lowcore(env);
507 lowcore->subchannel_id = cpu_to_be16(q->id);
508 lowcore->subchannel_nr = cpu_to_be16(q->nr);
509 lowcore->io_int_parm = cpu_to_be32(q->parm);
510 lowcore->io_int_word = cpu_to_be32(q->word);
511 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
512 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
513 mask = be64_to_cpu(lowcore->io_new_psw.mask);
514 addr = be64_to_cpu(lowcore->io_new_psw.addr);
516 cpu_unmap_lowcore(lowcore);
518 env->io_index[isc]--;
520 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
521 env->psw.mask, env->psw.addr);
522 load_psw(env, mask, addr);
524 if (env->io_index[isc] >= 0) {
525 disable = 0;
527 continue;
530 if (disable) {
531 env->pending_int &= ~INTERRUPT_IO;
536 static void do_mchk_interrupt(CPUS390XState *env)
538 S390CPU *cpu = s390_env_get_cpu(env);
539 uint64_t mask, addr;
540 LowCore *lowcore;
541 MchkQueue *q;
542 int i;
544 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
545 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
548 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
549 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
552 q = &env->mchk_queue[env->mchk_index];
554 if (q->type != 1) {
555 /* Don't know how to handle this... */
556 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
558 if (!(env->cregs[14] & (1 << 28))) {
559 /* CRW machine checks disabled */
560 return;
563 lowcore = cpu_map_lowcore(env);
565 for (i = 0; i < 16; i++) {
566 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
567 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
568 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
569 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
571 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
572 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
573 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
574 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
575 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
576 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
577 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
579 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
580 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
581 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
582 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
583 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
584 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
586 cpu_unmap_lowcore(lowcore);
588 env->mchk_index--;
589 if (env->mchk_index == -1) {
590 env->pending_int &= ~INTERRUPT_MCHK;
593 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
594 env->psw.mask, env->psw.addr);
596 load_psw(env, mask, addr);
599 void s390_cpu_do_interrupt(CPUState *cs)
601 S390CPU *cpu = S390_CPU(cs);
602 CPUS390XState *env = &cpu->env;
604 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
605 __func__, cs->exception_index, env->psw.addr);
607 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
608 /* handle machine checks */
609 if ((env->psw.mask & PSW_MASK_MCHECK) &&
610 (cs->exception_index == -1)) {
611 if (env->pending_int & INTERRUPT_MCHK) {
612 cs->exception_index = EXCP_MCHK;
615 /* handle external interrupts */
616 if ((env->psw.mask & PSW_MASK_EXT) &&
617 cs->exception_index == -1) {
618 if (env->pending_int & INTERRUPT_EXT) {
619 /* code is already in env */
620 cs->exception_index = EXCP_EXT;
621 } else if (env->pending_int & INTERRUPT_TOD) {
622 cpu_inject_ext(cpu, 0x1004, 0, 0);
623 cs->exception_index = EXCP_EXT;
624 env->pending_int &= ~INTERRUPT_EXT;
625 env->pending_int &= ~INTERRUPT_TOD;
626 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
627 cpu_inject_ext(cpu, 0x1005, 0, 0);
628 cs->exception_index = EXCP_EXT;
629 env->pending_int &= ~INTERRUPT_EXT;
630 env->pending_int &= ~INTERRUPT_TOD;
633 /* handle I/O interrupts */
634 if ((env->psw.mask & PSW_MASK_IO) &&
635 (cs->exception_index == -1)) {
636 if (env->pending_int & INTERRUPT_IO) {
637 cs->exception_index = EXCP_IO;
641 switch (cs->exception_index) {
642 case EXCP_PGM:
643 do_program_interrupt(env);
644 break;
645 case EXCP_SVC:
646 do_svc_interrupt(env);
647 break;
648 case EXCP_EXT:
649 do_ext_interrupt(env);
650 break;
651 case EXCP_IO:
652 do_io_interrupt(env);
653 break;
654 case EXCP_MCHK:
655 do_mchk_interrupt(env);
656 break;
658 cs->exception_index = -1;
660 if (!env->pending_int) {
661 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
665 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
667 if (interrupt_request & CPU_INTERRUPT_HARD) {
668 S390CPU *cpu = S390_CPU(cs);
669 CPUS390XState *env = &cpu->env;
671 if (env->ex_value) {
672 /* Execution of the target insn is indivisible from
673 the parent EXECUTE insn. */
674 return false;
676 if (env->psw.mask & PSW_MASK_EXT) {
677 s390_cpu_do_interrupt(cs);
678 return true;
681 return false;
684 void s390_cpu_recompute_watchpoints(CPUState *cs)
686 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
687 S390CPU *cpu = S390_CPU(cs);
688 CPUS390XState *env = &cpu->env;
690 /* We are called when the watchpoints have changed. First
691 remove them all. */
692 cpu_watchpoint_remove_all(cs, BP_CPU);
694 /* Return if PER is not enabled */
695 if (!(env->psw.mask & PSW_MASK_PER)) {
696 return;
699 /* Return if storage-alteration event is not enabled. */
700 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
701 return;
704 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
705 /* We can't create a watchoint spanning the whole memory range, so
706 split it in two parts. */
707 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
708 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
709 } else if (env->cregs[10] > env->cregs[11]) {
710 /* The address range loops, create two watchpoints. */
711 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
712 wp_flags, NULL);
713 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
715 } else {
716 /* Default case, create a single watchpoint. */
717 cpu_watchpoint_insert(cs, env->cregs[10],
718 env->cregs[11] - env->cregs[10] + 1,
719 wp_flags, NULL);
723 void s390x_cpu_debug_excp_handler(CPUState *cs)
725 S390CPU *cpu = S390_CPU(cs);
726 CPUS390XState *env = &cpu->env;
727 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
729 if (wp_hit && wp_hit->flags & BP_CPU) {
730 /* FIXME: When the storage-alteration-space control bit is set,
731 the exception should only be triggered if the memory access
732 is done using an address space with the storage-alteration-event
733 bit set. We have no way to detect that with the current
734 watchpoint code. */
735 cs->watchpoint_hit = NULL;
737 env->per_address = env->psw.addr;
738 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
739 /* FIXME: We currently no way to detect the address space used
740 to trigger the watchpoint. For now just consider it is the
741 current default ASC. This turn to be true except when MVCP
742 and MVCS instrutions are not used. */
743 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
745 /* Remove all watchpoints to re-execute the code. A PER exception
746 will be triggered, it will call load_psw which will recompute
747 the watchpoints. */
748 cpu_watchpoint_remove_all(cs, BP_CPU);
749 cpu_loop_exit_noexc(cs);
753 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
754 this is only for the atomic operations, for which we want to raise a
755 specification exception. */
756 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
757 MMUAccessType access_type,
758 int mmu_idx, uintptr_t retaddr)
760 S390CPU *cpu = S390_CPU(cs);
761 CPUS390XState *env = &cpu->env;
763 if (retaddr) {
764 cpu_restore_state(cs, retaddr);
766 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
768 #endif /* CONFIG_USER_ONLY */
770 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
771 int flags)
773 S390CPU *cpu = S390_CPU(cs);
774 CPUS390XState *env = &cpu->env;
775 int i;
777 if (env->cc_op > 3) {
778 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
779 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
780 } else {
781 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
782 env->psw.mask, env->psw.addr, env->cc_op);
785 for (i = 0; i < 16; i++) {
786 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
787 if ((i % 4) == 3) {
788 cpu_fprintf(f, "\n");
789 } else {
790 cpu_fprintf(f, " ");
794 for (i = 0; i < 16; i++) {
795 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
796 if ((i % 4) == 3) {
797 cpu_fprintf(f, "\n");
798 } else {
799 cpu_fprintf(f, " ");
803 for (i = 0; i < 32; i++) {
804 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
805 env->vregs[i][0].ll, env->vregs[i][1].ll);
806 cpu_fprintf(f, (i % 2) ? "\n" : " ");
809 #ifndef CONFIG_USER_ONLY
810 for (i = 0; i < 16; i++) {
811 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
812 if ((i % 4) == 3) {
813 cpu_fprintf(f, "\n");
814 } else {
815 cpu_fprintf(f, " ");
818 #endif
820 #ifdef DEBUG_INLINE_BRANCHES
821 for (i = 0; i < CC_OP_MAX; i++) {
822 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
823 inline_branch_miss[i], inline_branch_hit[i]);
825 #endif
827 cpu_fprintf(f, "\n");