4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/gdbstub.h"
24 #include "qemu/timer.h"
25 #include "exec/cpu_ldst.h"
26 #ifndef CONFIG_USER_ONLY
27 #include "sysemu/sysemu.h"
31 //#define DEBUG_S390_STDOUT
34 #ifdef DEBUG_S390_STDOUT
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
37 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
39 #define DPRINTF(fmt, ...) \
40 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
43 #define DPRINTF(fmt, ...) \
48 #ifndef CONFIG_USER_ONLY
49 void s390x_tod_timer(void *opaque
)
51 S390CPU
*cpu
= opaque
;
52 CPUS390XState
*env
= &cpu
->env
;
54 env
->pending_int
|= INTERRUPT_TOD
;
55 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
58 void s390x_cpu_timer(void *opaque
)
60 S390CPU
*cpu
= opaque
;
61 CPUS390XState
*env
= &cpu
->env
;
63 env
->pending_int
|= INTERRUPT_CPUTIMER
;
64 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
68 S390CPU
*cpu_s390x_init(const char *cpu_model
)
72 cpu
= S390_CPU(object_new(TYPE_S390_CPU
));
74 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
79 #if defined(CONFIG_USER_ONLY)
81 void s390_cpu_do_interrupt(CPUState
*cs
)
83 cs
->exception_index
= -1;
86 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
,
89 S390CPU
*cpu
= S390_CPU(cs
);
91 cs
->exception_index
= EXCP_PGM
;
92 cpu
->env
.int_pgm_code
= PGM_ADDRESSING
;
93 /* On real machines this value is dropped into LowMem. Since this
94 is userland, simply put this someplace that cpu_loop can find it. */
95 cpu
->env
.__excp_addr
= address
;
99 #else /* !CONFIG_USER_ONLY */
101 /* Ensure to exit the TB after this call! */
102 void trigger_pgm_exception(CPUS390XState
*env
, uint32_t code
, uint32_t ilen
)
104 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
106 cs
->exception_index
= EXCP_PGM
;
107 env
->int_pgm_code
= code
;
108 env
->int_pgm_ilen
= ilen
;
111 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr orig_vaddr
,
114 S390CPU
*cpu
= S390_CPU(cs
);
115 CPUS390XState
*env
= &cpu
->env
;
116 uint64_t asc
= cpu_mmu_idx_to_asc(mmu_idx
);
117 target_ulong vaddr
, raddr
;
120 DPRINTF("%s: address 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
121 __func__
, orig_vaddr
, rw
, mmu_idx
);
123 orig_vaddr
&= TARGET_PAGE_MASK
;
127 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
131 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
, true)) {
132 /* Translation ended in exception */
136 /* check out of RAM access */
137 if (raddr
> ram_size
) {
138 DPRINTF("%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
139 (uint64_t)raddr
, (uint64_t)ram_size
);
140 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_LATER
);
144 qemu_log_mask(CPU_LOG_MMU
, "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
145 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
147 tlb_set_page(cs
, orig_vaddr
, raddr
, prot
,
148 mmu_idx
, TARGET_PAGE_SIZE
);
153 hwaddr
s390_cpu_get_phys_page_debug(CPUState
*cs
, vaddr vaddr
)
155 S390CPU
*cpu
= S390_CPU(cs
);
156 CPUS390XState
*env
= &cpu
->env
;
159 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
162 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
166 if (mmu_translate(env
, vaddr
, MMU_INST_FETCH
, asc
, &raddr
, &prot
, false)) {
172 hwaddr
s390_cpu_get_phys_addr_debug(CPUState
*cs
, vaddr vaddr
)
177 page
= vaddr
& TARGET_PAGE_MASK
;
178 phys_addr
= cpu_get_phys_page_debug(cs
, page
);
179 phys_addr
+= (vaddr
& ~TARGET_PAGE_MASK
);
184 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
186 uint64_t old_mask
= env
->psw
.mask
;
188 env
->psw
.addr
= addr
;
189 env
->psw
.mask
= mask
;
191 env
->cc_op
= (mask
>> 44) & 3;
194 if ((old_mask
^ mask
) & PSW_MASK_PER
) {
195 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env
)));
198 if (mask
& PSW_MASK_WAIT
) {
199 S390CPU
*cpu
= s390_env_get_cpu(env
);
200 if (s390_cpu_halt(cpu
) == 0) {
201 #ifndef CONFIG_USER_ONLY
202 qemu_system_shutdown_request();
208 static uint64_t get_psw_mask(CPUS390XState
*env
)
210 uint64_t r
= env
->psw
.mask
;
213 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
217 assert(!(env
->cc_op
& ~3));
218 r
|= (uint64_t)env
->cc_op
<< 44;
224 static LowCore
*cpu_map_lowcore(CPUS390XState
*env
)
226 S390CPU
*cpu
= s390_env_get_cpu(env
);
228 hwaddr len
= sizeof(LowCore
);
230 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
232 if (len
< sizeof(LowCore
)) {
233 cpu_abort(CPU(cpu
), "Could not map lowcore\n");
239 static void cpu_unmap_lowcore(LowCore
*lowcore
)
241 cpu_physical_memory_unmap(lowcore
, sizeof(LowCore
), 1, sizeof(LowCore
));
244 void do_restart_interrupt(CPUS390XState
*env
)
249 lowcore
= cpu_map_lowcore(env
);
251 lowcore
->restart_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
252 lowcore
->restart_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
253 mask
= be64_to_cpu(lowcore
->restart_new_psw
.mask
);
254 addr
= be64_to_cpu(lowcore
->restart_new_psw
.addr
);
256 cpu_unmap_lowcore(lowcore
);
258 load_psw(env
, mask
, addr
);
261 static void do_program_interrupt(CPUS390XState
*env
)
265 int ilen
= env
->int_pgm_ilen
;
269 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
272 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
273 env
->psw
.addr
+= ilen
;
276 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
279 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
280 __func__
, env
->int_pgm_code
, ilen
);
282 lowcore
= cpu_map_lowcore(env
);
284 /* Signal PER events with the exception. */
285 if (env
->per_perc_atmid
) {
286 env
->int_pgm_code
|= PGM_PER
;
287 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
288 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
289 env
->per_perc_atmid
= 0;
292 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
293 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
294 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
295 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
296 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
297 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
298 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
300 cpu_unmap_lowcore(lowcore
);
302 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
303 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
306 load_psw(env
, mask
, addr
);
309 static void do_svc_interrupt(CPUS390XState
*env
)
314 lowcore
= cpu_map_lowcore(env
);
316 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
317 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
318 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
319 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
320 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
321 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
323 cpu_unmap_lowcore(lowcore
);
325 load_psw(env
, mask
, addr
);
327 /* When a PER event is pending, the PER exception has to happen
328 immediately after the SERVICE CALL one. */
329 if (env
->per_perc_atmid
) {
330 env
->int_pgm_code
= PGM_PER
;
331 env
->int_pgm_ilen
= env
->int_svc_ilen
;
332 do_program_interrupt(env
);
336 #define VIRTIO_SUBCODE_64 0x0D00
338 static void do_ext_interrupt(CPUS390XState
*env
)
340 S390CPU
*cpu
= s390_env_get_cpu(env
);
345 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
346 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
349 if (env
->ext_index
< 0 || env
->ext_index
>= MAX_EXT_QUEUE
) {
350 cpu_abort(CPU(cpu
), "Ext queue overrun: %d\n", env
->ext_index
);
353 q
= &env
->ext_queue
[env
->ext_index
];
354 lowcore
= cpu_map_lowcore(env
);
356 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
357 lowcore
->ext_params
= cpu_to_be32(q
->param
);
358 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
359 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
360 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
361 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
362 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
363 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
365 cpu_unmap_lowcore(lowcore
);
368 if (env
->ext_index
== -1) {
369 env
->pending_int
&= ~INTERRUPT_EXT
;
372 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
373 env
->psw
.mask
, env
->psw
.addr
);
375 load_psw(env
, mask
, addr
);
378 static void do_io_interrupt(CPUS390XState
*env
)
380 S390CPU
*cpu
= s390_env_get_cpu(env
);
387 if (!(env
->psw
.mask
& PSW_MASK_IO
)) {
388 cpu_abort(CPU(cpu
), "I/O int w/o I/O mask\n");
391 for (isc
= 0; isc
< ARRAY_SIZE(env
->io_index
); isc
++) {
394 if (env
->io_index
[isc
] < 0) {
397 if (env
->io_index
[isc
] >= MAX_IO_QUEUE
) {
398 cpu_abort(CPU(cpu
), "I/O queue overrun for isc %d: %d\n",
399 isc
, env
->io_index
[isc
]);
402 q
= &env
->io_queue
[env
->io_index
[isc
]][isc
];
403 isc_bits
= ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q
->word
));
404 if (!(env
->cregs
[6] & isc_bits
)) {
412 lowcore
= cpu_map_lowcore(env
);
414 lowcore
->subchannel_id
= cpu_to_be16(q
->id
);
415 lowcore
->subchannel_nr
= cpu_to_be16(q
->nr
);
416 lowcore
->io_int_parm
= cpu_to_be32(q
->parm
);
417 lowcore
->io_int_word
= cpu_to_be32(q
->word
);
418 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
419 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
420 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
421 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
423 cpu_unmap_lowcore(lowcore
);
425 env
->io_index
[isc
]--;
427 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
428 env
->psw
.mask
, env
->psw
.addr
);
429 load_psw(env
, mask
, addr
);
431 if (env
->io_index
[isc
] >= 0) {
438 env
->pending_int
&= ~INTERRUPT_IO
;
443 static void do_mchk_interrupt(CPUS390XState
*env
)
445 S390CPU
*cpu
= s390_env_get_cpu(env
);
451 if (!(env
->psw
.mask
& PSW_MASK_MCHECK
)) {
452 cpu_abort(CPU(cpu
), "Machine check w/o mchk mask\n");
455 if (env
->mchk_index
< 0 || env
->mchk_index
>= MAX_MCHK_QUEUE
) {
456 cpu_abort(CPU(cpu
), "Mchk queue overrun: %d\n", env
->mchk_index
);
459 q
= &env
->mchk_queue
[env
->mchk_index
];
462 /* Don't know how to handle this... */
463 cpu_abort(CPU(cpu
), "Unknown machine check type %d\n", q
->type
);
465 if (!(env
->cregs
[14] & (1 << 28))) {
466 /* CRW machine checks disabled */
470 lowcore
= cpu_map_lowcore(env
);
472 for (i
= 0; i
< 16; i
++) {
473 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(get_freg(env
, i
)->ll
);
474 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
475 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
476 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
478 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
479 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
480 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
481 lowcore
->cpu_timer_save_area
[0] = cpu_to_be32(env
->cputm
>> 32);
482 lowcore
->cpu_timer_save_area
[1] = cpu_to_be32((uint32_t)env
->cputm
);
483 lowcore
->clock_comp_save_area
[0] = cpu_to_be32(env
->ckc
>> 32);
484 lowcore
->clock_comp_save_area
[1] = cpu_to_be32((uint32_t)env
->ckc
);
486 lowcore
->mcck_interruption_code
[0] = cpu_to_be32(0x00400f1d);
487 lowcore
->mcck_interruption_code
[1] = cpu_to_be32(0x40330000);
488 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
489 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
490 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
491 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
493 cpu_unmap_lowcore(lowcore
);
496 if (env
->mchk_index
== -1) {
497 env
->pending_int
&= ~INTERRUPT_MCHK
;
500 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
501 env
->psw
.mask
, env
->psw
.addr
);
503 load_psw(env
, mask
, addr
);
506 void s390_cpu_do_interrupt(CPUState
*cs
)
508 S390CPU
*cpu
= S390_CPU(cs
);
509 CPUS390XState
*env
= &cpu
->env
;
511 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
512 __func__
, cs
->exception_index
, env
->psw
.addr
);
514 s390_cpu_set_state(CPU_STATE_OPERATING
, cpu
);
515 /* handle machine checks */
516 if ((env
->psw
.mask
& PSW_MASK_MCHECK
) &&
517 (cs
->exception_index
== -1)) {
518 if (env
->pending_int
& INTERRUPT_MCHK
) {
519 cs
->exception_index
= EXCP_MCHK
;
522 /* handle external interrupts */
523 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
524 cs
->exception_index
== -1) {
525 if (env
->pending_int
& INTERRUPT_EXT
) {
526 /* code is already in env */
527 cs
->exception_index
= EXCP_EXT
;
528 } else if (env
->pending_int
& INTERRUPT_TOD
) {
529 cpu_inject_ext(cpu
, 0x1004, 0, 0);
530 cs
->exception_index
= EXCP_EXT
;
531 env
->pending_int
&= ~INTERRUPT_EXT
;
532 env
->pending_int
&= ~INTERRUPT_TOD
;
533 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
534 cpu_inject_ext(cpu
, 0x1005, 0, 0);
535 cs
->exception_index
= EXCP_EXT
;
536 env
->pending_int
&= ~INTERRUPT_EXT
;
537 env
->pending_int
&= ~INTERRUPT_TOD
;
540 /* handle I/O interrupts */
541 if ((env
->psw
.mask
& PSW_MASK_IO
) &&
542 (cs
->exception_index
== -1)) {
543 if (env
->pending_int
& INTERRUPT_IO
) {
544 cs
->exception_index
= EXCP_IO
;
548 switch (cs
->exception_index
) {
550 do_program_interrupt(env
);
553 do_svc_interrupt(env
);
556 do_ext_interrupt(env
);
559 do_io_interrupt(env
);
562 do_mchk_interrupt(env
);
565 cs
->exception_index
= -1;
567 if (!env
->pending_int
) {
568 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
572 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
574 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
575 S390CPU
*cpu
= S390_CPU(cs
);
576 CPUS390XState
*env
= &cpu
->env
;
578 if (env
->psw
.mask
& PSW_MASK_EXT
) {
579 s390_cpu_do_interrupt(cs
);
586 void s390_cpu_recompute_watchpoints(CPUState
*cs
)
588 const int wp_flags
= BP_CPU
| BP_MEM_WRITE
| BP_STOP_BEFORE_ACCESS
;
589 S390CPU
*cpu
= S390_CPU(cs
);
590 CPUS390XState
*env
= &cpu
->env
;
592 /* We are called when the watchpoints have changed. First
594 cpu_watchpoint_remove_all(cs
, BP_CPU
);
596 /* Return if PER is not enabled */
597 if (!(env
->psw
.mask
& PSW_MASK_PER
)) {
601 /* Return if storage-alteration event is not enabled. */
602 if (!(env
->cregs
[9] & PER_CR9_EVENT_STORE
)) {
606 if (env
->cregs
[10] == 0 && env
->cregs
[11] == -1LL) {
607 /* We can't create a watchoint spanning the whole memory range, so
608 split it in two parts. */
609 cpu_watchpoint_insert(cs
, 0, 1ULL << 63, wp_flags
, NULL
);
610 cpu_watchpoint_insert(cs
, 1ULL << 63, 1ULL << 63, wp_flags
, NULL
);
611 } else if (env
->cregs
[10] > env
->cregs
[11]) {
612 /* The address range loops, create two watchpoints. */
613 cpu_watchpoint_insert(cs
, env
->cregs
[10], -env
->cregs
[10],
615 cpu_watchpoint_insert(cs
, 0, env
->cregs
[11] + 1, wp_flags
, NULL
);
618 /* Default case, create a single watchpoint. */
619 cpu_watchpoint_insert(cs
, env
->cregs
[10],
620 env
->cregs
[11] - env
->cregs
[10] + 1,
625 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
627 S390CPU
*cpu
= S390_CPU(cs
);
628 CPUS390XState
*env
= &cpu
->env
;
629 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
631 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
632 /* FIXME: When the storage-alteration-space control bit is set,
633 the exception should only be triggered if the memory access
634 is done using an address space with the storage-alteration-event
635 bit set. We have no way to detect that with the current
637 cs
->watchpoint_hit
= NULL
;
639 env
->per_address
= env
->psw
.addr
;
640 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
641 /* FIXME: We currently no way to detect the address space used
642 to trigger the watchpoint. For now just consider it is the
643 current default ASC. This turn to be true except when MVCP
644 and MVCS instrutions are not used. */
645 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
647 /* Remove all watchpoints to re-execute the code. A PER exception
648 will be triggered, it will call load_psw which will recompute
650 cpu_watchpoint_remove_all(cs
, BP_CPU
);
651 cpu_resume_from_signal(cs
, NULL
);
654 #endif /* CONFIG_USER_ONLY */