4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
34 //#define DEBUG_S390_STDOUT
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
42 #define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
46 #define DPRINTF(fmt, ...) \
51 #ifndef CONFIG_USER_ONLY
52 void s390x_tod_timer(void *opaque
)
54 S390CPU
*cpu
= opaque
;
55 CPUS390XState
*env
= &cpu
->env
;
57 env
->pending_int
|= INTERRUPT_TOD
;
58 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
61 void s390x_cpu_timer(void *opaque
)
63 S390CPU
*cpu
= opaque
;
64 CPUS390XState
*env
= &cpu
->env
;
66 env
->pending_int
|= INTERRUPT_CPUTIMER
;
67 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
71 S390CPU
*cpu_s390x_create(const char *cpu_model
, Error
**errp
)
73 static bool features_parsed
;
74 char *name
, *features
;
79 name
= g_strdup(cpu_model
);
80 features
= strchr(name
, ',');
86 oc
= cpu_class_by_name(TYPE_S390_CPU
, name
);
88 error_setg(errp
, "Unknown CPU definition \'%s\'", name
);
92 typename
= object_class_get_name(oc
);
94 if (!features_parsed
) {
95 features_parsed
= true;
97 cc
->parse_features(typename
, features
, errp
);
104 return S390_CPU(CPU(object_new(typename
)));
107 S390CPU
*s390x_new_cpu(const char *cpu_model
, int64_t id
, Error
**errp
)
112 cpu
= cpu_s390x_create(cpu_model
, &err
);
117 object_property_set_int(OBJECT(cpu
), id
, "id", &err
);
121 object_property_set_bool(OBJECT(cpu
), true, "realized", &err
);
125 error_propagate(errp
, err
);
126 object_unref(OBJECT(cpu
));
132 S390CPU
*cpu_s390x_init(const char *cpu_model
)
136 /* Use to track CPU ID for linux-user only */
137 static int64_t next_cpu_id
;
139 cpu
= s390x_new_cpu(cpu_model
, next_cpu_id
++, &err
);
141 error_report_err(err
);
146 #if defined(CONFIG_USER_ONLY)
148 void s390_cpu_do_interrupt(CPUState
*cs
)
150 cs
->exception_index
= -1;
153 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
,
156 S390CPU
*cpu
= S390_CPU(cs
);
158 cs
->exception_index
= EXCP_PGM
;
159 cpu
->env
.int_pgm_code
= PGM_ADDRESSING
;
160 /* On real machines this value is dropped into LowMem. Since this
161 is userland, simply put this someplace that cpu_loop can find it. */
162 cpu
->env
.__excp_addr
= address
;
166 #else /* !CONFIG_USER_ONLY */
168 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr orig_vaddr
,
171 S390CPU
*cpu
= S390_CPU(cs
);
172 CPUS390XState
*env
= &cpu
->env
;
173 uint64_t asc
= cpu_mmu_idx_to_asc(mmu_idx
);
174 target_ulong vaddr
, raddr
;
177 DPRINTF("%s: address 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
178 __func__
, orig_vaddr
, rw
, mmu_idx
);
180 orig_vaddr
&= TARGET_PAGE_MASK
;
184 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
188 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
, true)) {
189 /* Translation ended in exception */
193 /* check out of RAM access */
194 if (raddr
> ram_size
) {
195 DPRINTF("%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
196 (uint64_t)raddr
, (uint64_t)ram_size
);
197 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_AUTO
);
201 qemu_log_mask(CPU_LOG_MMU
, "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
202 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
204 tlb_set_page(cs
, orig_vaddr
, raddr
, prot
,
205 mmu_idx
, TARGET_PAGE_SIZE
);
210 hwaddr
s390_cpu_get_phys_page_debug(CPUState
*cs
, vaddr vaddr
)
212 S390CPU
*cpu
= S390_CPU(cs
);
213 CPUS390XState
*env
= &cpu
->env
;
216 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
219 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
223 if (mmu_translate(env
, vaddr
, MMU_INST_FETCH
, asc
, &raddr
, &prot
, false)) {
229 hwaddr
s390_cpu_get_phys_addr_debug(CPUState
*cs
, vaddr vaddr
)
234 page
= vaddr
& TARGET_PAGE_MASK
;
235 phys_addr
= cpu_get_phys_page_debug(cs
, page
);
236 phys_addr
+= (vaddr
& ~TARGET_PAGE_MASK
);
241 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
243 uint64_t old_mask
= env
->psw
.mask
;
245 env
->psw
.addr
= addr
;
246 env
->psw
.mask
= mask
;
248 env
->cc_op
= (mask
>> 44) & 3;
251 if ((old_mask
^ mask
) & PSW_MASK_PER
) {
252 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env
)));
255 if (mask
& PSW_MASK_WAIT
) {
256 S390CPU
*cpu
= s390_env_get_cpu(env
);
257 if (s390_cpu_halt(cpu
) == 0) {
258 #ifndef CONFIG_USER_ONLY
259 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
265 static uint64_t get_psw_mask(CPUS390XState
*env
)
267 uint64_t r
= env
->psw
.mask
;
270 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
274 assert(!(env
->cc_op
& ~3));
275 r
|= (uint64_t)env
->cc_op
<< 44;
281 static LowCore
*cpu_map_lowcore(CPUS390XState
*env
)
283 S390CPU
*cpu
= s390_env_get_cpu(env
);
285 hwaddr len
= sizeof(LowCore
);
287 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
289 if (len
< sizeof(LowCore
)) {
290 cpu_abort(CPU(cpu
), "Could not map lowcore\n");
296 static void cpu_unmap_lowcore(LowCore
*lowcore
)
298 cpu_physical_memory_unmap(lowcore
, sizeof(LowCore
), 1, sizeof(LowCore
));
301 void do_restart_interrupt(CPUS390XState
*env
)
306 lowcore
= cpu_map_lowcore(env
);
308 lowcore
->restart_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
309 lowcore
->restart_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
310 mask
= be64_to_cpu(lowcore
->restart_new_psw
.mask
);
311 addr
= be64_to_cpu(lowcore
->restart_new_psw
.addr
);
313 cpu_unmap_lowcore(lowcore
);
315 load_psw(env
, mask
, addr
);
318 static void do_program_interrupt(CPUS390XState
*env
)
322 int ilen
= env
->int_pgm_ilen
;
324 if (ilen
== ILEN_AUTO
) {
325 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
327 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
329 switch (env
->int_pgm_code
) {
331 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
340 case PGM_SPECIFICATION
:
342 case PGM_FIXPT_OVERFLOW
:
343 case PGM_FIXPT_DIVIDE
:
344 case PGM_DEC_OVERFLOW
:
346 case PGM_HFP_EXP_OVERFLOW
:
347 case PGM_HFP_EXP_UNDERFLOW
:
348 case PGM_HFP_SIGNIFICANCE
:
354 case PGM_PC_TRANS_SPEC
:
357 /* advance the PSW if our exception is not nullifying */
358 env
->psw
.addr
+= ilen
;
362 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
363 __func__
, env
->int_pgm_code
, ilen
);
365 lowcore
= cpu_map_lowcore(env
);
367 /* Signal PER events with the exception. */
368 if (env
->per_perc_atmid
) {
369 env
->int_pgm_code
|= PGM_PER
;
370 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
371 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
372 env
->per_perc_atmid
= 0;
375 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
376 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
377 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
378 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
379 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
380 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
381 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
383 cpu_unmap_lowcore(lowcore
);
385 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
386 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
389 load_psw(env
, mask
, addr
);
392 static void do_svc_interrupt(CPUS390XState
*env
)
397 lowcore
= cpu_map_lowcore(env
);
399 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
400 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
401 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
402 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
403 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
404 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
406 cpu_unmap_lowcore(lowcore
);
408 load_psw(env
, mask
, addr
);
410 /* When a PER event is pending, the PER exception has to happen
411 immediately after the SERVICE CALL one. */
412 if (env
->per_perc_atmid
) {
413 env
->int_pgm_code
= PGM_PER
;
414 env
->int_pgm_ilen
= env
->int_svc_ilen
;
415 do_program_interrupt(env
);
419 #define VIRTIO_SUBCODE_64 0x0D00
421 static void do_ext_interrupt(CPUS390XState
*env
)
423 S390CPU
*cpu
= s390_env_get_cpu(env
);
428 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
429 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
432 if (env
->ext_index
< 0 || env
->ext_index
>= MAX_EXT_QUEUE
) {
433 cpu_abort(CPU(cpu
), "Ext queue overrun: %d\n", env
->ext_index
);
436 q
= &env
->ext_queue
[env
->ext_index
];
437 lowcore
= cpu_map_lowcore(env
);
439 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
440 lowcore
->ext_params
= cpu_to_be32(q
->param
);
441 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
442 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
443 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
444 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
445 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
446 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
448 cpu_unmap_lowcore(lowcore
);
451 if (env
->ext_index
== -1) {
452 env
->pending_int
&= ~INTERRUPT_EXT
;
455 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
456 env
->psw
.mask
, env
->psw
.addr
);
458 load_psw(env
, mask
, addr
);
461 static void do_io_interrupt(CPUS390XState
*env
)
463 S390CPU
*cpu
= s390_env_get_cpu(env
);
470 if (!(env
->psw
.mask
& PSW_MASK_IO
)) {
471 cpu_abort(CPU(cpu
), "I/O int w/o I/O mask\n");
474 for (isc
= 0; isc
< ARRAY_SIZE(env
->io_index
); isc
++) {
477 if (env
->io_index
[isc
] < 0) {
480 if (env
->io_index
[isc
] >= MAX_IO_QUEUE
) {
481 cpu_abort(CPU(cpu
), "I/O queue overrun for isc %d: %d\n",
482 isc
, env
->io_index
[isc
]);
485 q
= &env
->io_queue
[env
->io_index
[isc
]][isc
];
486 isc_bits
= ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q
->word
));
487 if (!(env
->cregs
[6] & isc_bits
)) {
495 lowcore
= cpu_map_lowcore(env
);
497 lowcore
->subchannel_id
= cpu_to_be16(q
->id
);
498 lowcore
->subchannel_nr
= cpu_to_be16(q
->nr
);
499 lowcore
->io_int_parm
= cpu_to_be32(q
->parm
);
500 lowcore
->io_int_word
= cpu_to_be32(q
->word
);
501 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
502 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
503 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
504 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
506 cpu_unmap_lowcore(lowcore
);
508 env
->io_index
[isc
]--;
510 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
511 env
->psw
.mask
, env
->psw
.addr
);
512 load_psw(env
, mask
, addr
);
514 if (env
->io_index
[isc
] >= 0) {
521 env
->pending_int
&= ~INTERRUPT_IO
;
526 static void do_mchk_interrupt(CPUS390XState
*env
)
528 S390CPU
*cpu
= s390_env_get_cpu(env
);
534 if (!(env
->psw
.mask
& PSW_MASK_MCHECK
)) {
535 cpu_abort(CPU(cpu
), "Machine check w/o mchk mask\n");
538 if (env
->mchk_index
< 0 || env
->mchk_index
>= MAX_MCHK_QUEUE
) {
539 cpu_abort(CPU(cpu
), "Mchk queue overrun: %d\n", env
->mchk_index
);
542 q
= &env
->mchk_queue
[env
->mchk_index
];
545 /* Don't know how to handle this... */
546 cpu_abort(CPU(cpu
), "Unknown machine check type %d\n", q
->type
);
548 if (!(env
->cregs
[14] & (1 << 28))) {
549 /* CRW machine checks disabled */
553 lowcore
= cpu_map_lowcore(env
);
555 for (i
= 0; i
< 16; i
++) {
556 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(get_freg(env
, i
)->ll
);
557 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
558 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
559 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
561 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
562 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
563 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
564 lowcore
->cpu_timer_save_area
[0] = cpu_to_be32(env
->cputm
>> 32);
565 lowcore
->cpu_timer_save_area
[1] = cpu_to_be32((uint32_t)env
->cputm
);
566 lowcore
->clock_comp_save_area
[0] = cpu_to_be32(env
->ckc
>> 32);
567 lowcore
->clock_comp_save_area
[1] = cpu_to_be32((uint32_t)env
->ckc
);
569 lowcore
->mcck_interruption_code
[0] = cpu_to_be32(0x00400f1d);
570 lowcore
->mcck_interruption_code
[1] = cpu_to_be32(0x40330000);
571 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
572 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
573 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
574 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
576 cpu_unmap_lowcore(lowcore
);
579 if (env
->mchk_index
== -1) {
580 env
->pending_int
&= ~INTERRUPT_MCHK
;
583 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
584 env
->psw
.mask
, env
->psw
.addr
);
586 load_psw(env
, mask
, addr
);
589 void s390_cpu_do_interrupt(CPUState
*cs
)
591 S390CPU
*cpu
= S390_CPU(cs
);
592 CPUS390XState
*env
= &cpu
->env
;
594 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
595 __func__
, cs
->exception_index
, env
->psw
.addr
);
597 s390_cpu_set_state(CPU_STATE_OPERATING
, cpu
);
598 /* handle machine checks */
599 if ((env
->psw
.mask
& PSW_MASK_MCHECK
) &&
600 (cs
->exception_index
== -1)) {
601 if (env
->pending_int
& INTERRUPT_MCHK
) {
602 cs
->exception_index
= EXCP_MCHK
;
605 /* handle external interrupts */
606 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
607 cs
->exception_index
== -1) {
608 if (env
->pending_int
& INTERRUPT_EXT
) {
609 /* code is already in env */
610 cs
->exception_index
= EXCP_EXT
;
611 } else if (env
->pending_int
& INTERRUPT_TOD
) {
612 cpu_inject_ext(cpu
, 0x1004, 0, 0);
613 cs
->exception_index
= EXCP_EXT
;
614 env
->pending_int
&= ~INTERRUPT_EXT
;
615 env
->pending_int
&= ~INTERRUPT_TOD
;
616 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
617 cpu_inject_ext(cpu
, 0x1005, 0, 0);
618 cs
->exception_index
= EXCP_EXT
;
619 env
->pending_int
&= ~INTERRUPT_EXT
;
620 env
->pending_int
&= ~INTERRUPT_TOD
;
623 /* handle I/O interrupts */
624 if ((env
->psw
.mask
& PSW_MASK_IO
) &&
625 (cs
->exception_index
== -1)) {
626 if (env
->pending_int
& INTERRUPT_IO
) {
627 cs
->exception_index
= EXCP_IO
;
631 switch (cs
->exception_index
) {
633 do_program_interrupt(env
);
636 do_svc_interrupt(env
);
639 do_ext_interrupt(env
);
642 do_io_interrupt(env
);
645 do_mchk_interrupt(env
);
648 cs
->exception_index
= -1;
650 if (!env
->pending_int
) {
651 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
655 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
657 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
658 S390CPU
*cpu
= S390_CPU(cs
);
659 CPUS390XState
*env
= &cpu
->env
;
662 /* Execution of the target insn is indivisible from
663 the parent EXECUTE insn. */
666 if (env
->psw
.mask
& PSW_MASK_EXT
) {
667 s390_cpu_do_interrupt(cs
);
674 void s390_cpu_recompute_watchpoints(CPUState
*cs
)
676 const int wp_flags
= BP_CPU
| BP_MEM_WRITE
| BP_STOP_BEFORE_ACCESS
;
677 S390CPU
*cpu
= S390_CPU(cs
);
678 CPUS390XState
*env
= &cpu
->env
;
680 /* We are called when the watchpoints have changed. First
682 cpu_watchpoint_remove_all(cs
, BP_CPU
);
684 /* Return if PER is not enabled */
685 if (!(env
->psw
.mask
& PSW_MASK_PER
)) {
689 /* Return if storage-alteration event is not enabled. */
690 if (!(env
->cregs
[9] & PER_CR9_EVENT_STORE
)) {
694 if (env
->cregs
[10] == 0 && env
->cregs
[11] == -1LL) {
695 /* We can't create a watchoint spanning the whole memory range, so
696 split it in two parts. */
697 cpu_watchpoint_insert(cs
, 0, 1ULL << 63, wp_flags
, NULL
);
698 cpu_watchpoint_insert(cs
, 1ULL << 63, 1ULL << 63, wp_flags
, NULL
);
699 } else if (env
->cregs
[10] > env
->cregs
[11]) {
700 /* The address range loops, create two watchpoints. */
701 cpu_watchpoint_insert(cs
, env
->cregs
[10], -env
->cregs
[10],
703 cpu_watchpoint_insert(cs
, 0, env
->cregs
[11] + 1, wp_flags
, NULL
);
706 /* Default case, create a single watchpoint. */
707 cpu_watchpoint_insert(cs
, env
->cregs
[10],
708 env
->cregs
[11] - env
->cregs
[10] + 1,
713 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
715 S390CPU
*cpu
= S390_CPU(cs
);
716 CPUS390XState
*env
= &cpu
->env
;
717 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
719 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
720 /* FIXME: When the storage-alteration-space control bit is set,
721 the exception should only be triggered if the memory access
722 is done using an address space with the storage-alteration-event
723 bit set. We have no way to detect that with the current
725 cs
->watchpoint_hit
= NULL
;
727 env
->per_address
= env
->psw
.addr
;
728 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
729 /* FIXME: We currently no way to detect the address space used
730 to trigger the watchpoint. For now just consider it is the
731 current default ASC. This turn to be true except when MVCP
732 and MVCS instrutions are not used. */
733 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
735 /* Remove all watchpoints to re-execute the code. A PER exception
736 will be triggered, it will call load_psw which will recompute
738 cpu_watchpoint_remove_all(cs
, BP_CPU
);
739 cpu_loop_exit_noexc(cs
);
743 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
744 this is only for the atomic operations, for which we want to raise a
745 specification exception. */
746 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
747 MMUAccessType access_type
,
748 int mmu_idx
, uintptr_t retaddr
)
750 S390CPU
*cpu
= S390_CPU(cs
);
751 CPUS390XState
*env
= &cpu
->env
;
754 cpu_restore_state(cs
, retaddr
);
756 program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
);
758 #endif /* CONFIG_USER_ONLY */
760 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
763 S390CPU
*cpu
= S390_CPU(cs
);
764 CPUS390XState
*env
= &cpu
->env
;
767 if (env
->cc_op
> 3) {
768 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
769 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
771 cpu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
772 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
775 for (i
= 0; i
< 16; i
++) {
776 cpu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
778 cpu_fprintf(f
, "\n");
784 for (i
= 0; i
< 16; i
++) {
785 cpu_fprintf(f
, "F%02d=%016" PRIx64
, i
, get_freg(env
, i
)->ll
);
787 cpu_fprintf(f
, "\n");
793 for (i
= 0; i
< 32; i
++) {
794 cpu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
, i
,
795 env
->vregs
[i
][0].ll
, env
->vregs
[i
][1].ll
);
796 cpu_fprintf(f
, (i
% 2) ? "\n" : " ");
799 #ifndef CONFIG_USER_ONLY
800 for (i
= 0; i
< 16; i
++) {
801 cpu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
803 cpu_fprintf(f
, "\n");
810 #ifdef DEBUG_INLINE_BRANCHES
811 for (i
= 0; i
< CC_OP_MAX
; i
++) {
812 cpu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
813 inline_branch_miss
[i
], inline_branch_hit
[i
]);
817 cpu_fprintf(f
, "\n");