4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/gdbstub.h"
23 #include "qemu/timer.h"
24 #include "exec/cpu_ldst.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
30 //#define DEBUG_S390_STDOUT
33 #ifdef DEBUG_S390_STDOUT
34 #define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36 qemu_log(fmt, ##__VA_ARGS__); } while (0)
38 #define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
42 #define DPRINTF(fmt, ...) \
47 #ifndef CONFIG_USER_ONLY
48 void s390x_tod_timer(void *opaque
)
50 S390CPU
*cpu
= opaque
;
51 CPUS390XState
*env
= &cpu
->env
;
53 env
->pending_int
|= INTERRUPT_TOD
;
54 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
57 void s390x_cpu_timer(void *opaque
)
59 S390CPU
*cpu
= opaque
;
60 CPUS390XState
*env
= &cpu
->env
;
62 env
->pending_int
|= INTERRUPT_CPUTIMER
;
63 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
67 S390CPU
*cpu_s390x_init(const char *cpu_model
)
71 cpu
= S390_CPU(object_new(TYPE_S390_CPU
));
73 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
78 #if defined(CONFIG_USER_ONLY)
80 void s390_cpu_do_interrupt(CPUState
*cs
)
82 cs
->exception_index
= -1;
85 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
,
88 S390CPU
*cpu
= S390_CPU(cs
);
90 cs
->exception_index
= EXCP_PGM
;
91 cpu
->env
.int_pgm_code
= PGM_ADDRESSING
;
92 /* On real machines this value is dropped into LowMem. Since this
93 is userland, simply put this someplace that cpu_loop can find it. */
94 cpu
->env
.__excp_addr
= address
;
98 #else /* !CONFIG_USER_ONLY */
100 /* Ensure to exit the TB after this call! */
101 void trigger_pgm_exception(CPUS390XState
*env
, uint32_t code
, uint32_t ilen
)
103 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
105 cs
->exception_index
= EXCP_PGM
;
106 env
->int_pgm_code
= code
;
107 env
->int_pgm_ilen
= ilen
;
110 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr orig_vaddr
,
113 S390CPU
*cpu
= S390_CPU(cs
);
114 CPUS390XState
*env
= &cpu
->env
;
115 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
116 target_ulong vaddr
, raddr
;
119 DPRINTF("%s: address 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
120 __func__
, orig_vaddr
, rw
, mmu_idx
);
122 orig_vaddr
&= TARGET_PAGE_MASK
;
126 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
130 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
, true)) {
131 /* Translation ended in exception */
135 /* check out of RAM access */
136 if (raddr
> (ram_size
+ virtio_size
)) {
137 DPRINTF("%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
138 (uint64_t)raddr
, (uint64_t)ram_size
);
139 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_LATER
);
143 qemu_log_mask(CPU_LOG_MMU
, "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
144 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
146 tlb_set_page(cs
, orig_vaddr
, raddr
, prot
,
147 mmu_idx
, TARGET_PAGE_SIZE
);
152 hwaddr
s390_cpu_get_phys_page_debug(CPUState
*cs
, vaddr vaddr
)
154 S390CPU
*cpu
= S390_CPU(cs
);
155 CPUS390XState
*env
= &cpu
->env
;
158 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
161 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
165 mmu_translate(env
, vaddr
, MMU_INST_FETCH
, asc
, &raddr
, &prot
, false);
170 hwaddr
s390_cpu_get_phys_addr_debug(CPUState
*cs
, vaddr vaddr
)
175 page
= vaddr
& TARGET_PAGE_MASK
;
176 phys_addr
= cpu_get_phys_page_debug(cs
, page
);
177 phys_addr
+= (vaddr
& ~TARGET_PAGE_MASK
);
182 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
184 env
->psw
.addr
= addr
;
185 env
->psw
.mask
= mask
;
187 env
->cc_op
= (mask
>> 44) & 3;
190 if (mask
& PSW_MASK_WAIT
) {
191 S390CPU
*cpu
= s390_env_get_cpu(env
);
192 if (s390_cpu_halt(cpu
) == 0) {
193 #ifndef CONFIG_USER_ONLY
194 qemu_system_shutdown_request();
200 static uint64_t get_psw_mask(CPUS390XState
*env
)
202 uint64_t r
= env
->psw
.mask
;
205 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
209 assert(!(env
->cc_op
& ~3));
210 r
|= (uint64_t)env
->cc_op
<< 44;
216 static LowCore
*cpu_map_lowcore(CPUS390XState
*env
)
218 S390CPU
*cpu
= s390_env_get_cpu(env
);
220 hwaddr len
= sizeof(LowCore
);
222 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
224 if (len
< sizeof(LowCore
)) {
225 cpu_abort(CPU(cpu
), "Could not map lowcore\n");
231 static void cpu_unmap_lowcore(LowCore
*lowcore
)
233 cpu_physical_memory_unmap(lowcore
, sizeof(LowCore
), 1, sizeof(LowCore
));
236 void do_restart_interrupt(CPUS390XState
*env
)
241 lowcore
= cpu_map_lowcore(env
);
243 lowcore
->restart_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
244 lowcore
->restart_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
245 mask
= be64_to_cpu(lowcore
->restart_new_psw
.mask
);
246 addr
= be64_to_cpu(lowcore
->restart_new_psw
.addr
);
248 cpu_unmap_lowcore(lowcore
);
250 load_psw(env
, mask
, addr
);
253 static void do_svc_interrupt(CPUS390XState
*env
)
258 lowcore
= cpu_map_lowcore(env
);
260 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
261 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
262 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
263 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
264 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
265 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
267 cpu_unmap_lowcore(lowcore
);
269 load_psw(env
, mask
, addr
);
272 static void do_program_interrupt(CPUS390XState
*env
)
276 int ilen
= env
->int_pgm_ilen
;
280 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
283 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
284 env
->psw
.addr
+= ilen
;
287 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
290 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
291 __func__
, env
->int_pgm_code
, ilen
);
293 lowcore
= cpu_map_lowcore(env
);
295 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
296 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
297 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
298 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
299 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
300 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
302 cpu_unmap_lowcore(lowcore
);
304 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
305 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
308 load_psw(env
, mask
, addr
);
311 #define VIRTIO_SUBCODE_64 0x0D00
313 static void do_ext_interrupt(CPUS390XState
*env
)
315 S390CPU
*cpu
= s390_env_get_cpu(env
);
320 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
321 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
324 if (env
->ext_index
< 0 || env
->ext_index
>= MAX_EXT_QUEUE
) {
325 cpu_abort(CPU(cpu
), "Ext queue overrun: %d\n", env
->ext_index
);
328 q
= &env
->ext_queue
[env
->ext_index
];
329 lowcore
= cpu_map_lowcore(env
);
331 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
332 lowcore
->ext_params
= cpu_to_be32(q
->param
);
333 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
334 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
335 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
336 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
337 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
338 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
340 cpu_unmap_lowcore(lowcore
);
343 if (env
->ext_index
== -1) {
344 env
->pending_int
&= ~INTERRUPT_EXT
;
347 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
348 env
->psw
.mask
, env
->psw
.addr
);
350 load_psw(env
, mask
, addr
);
353 static void do_io_interrupt(CPUS390XState
*env
)
355 S390CPU
*cpu
= s390_env_get_cpu(env
);
362 if (!(env
->psw
.mask
& PSW_MASK_IO
)) {
363 cpu_abort(CPU(cpu
), "I/O int w/o I/O mask\n");
366 for (isc
= 0; isc
< ARRAY_SIZE(env
->io_index
); isc
++) {
369 if (env
->io_index
[isc
] < 0) {
372 if (env
->io_index
[isc
] >= MAX_IO_QUEUE
) {
373 cpu_abort(CPU(cpu
), "I/O queue overrun for isc %d: %d\n",
374 isc
, env
->io_index
[isc
]);
377 q
= &env
->io_queue
[env
->io_index
[isc
]][isc
];
378 isc_bits
= ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q
->word
));
379 if (!(env
->cregs
[6] & isc_bits
)) {
387 lowcore
= cpu_map_lowcore(env
);
389 lowcore
->subchannel_id
= cpu_to_be16(q
->id
);
390 lowcore
->subchannel_nr
= cpu_to_be16(q
->nr
);
391 lowcore
->io_int_parm
= cpu_to_be32(q
->parm
);
392 lowcore
->io_int_word
= cpu_to_be32(q
->word
);
393 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
394 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
395 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
396 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
398 cpu_unmap_lowcore(lowcore
);
400 env
->io_index
[isc
]--;
402 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
403 env
->psw
.mask
, env
->psw
.addr
);
404 load_psw(env
, mask
, addr
);
406 if (env
->io_index
[isc
] >= 0) {
413 env
->pending_int
&= ~INTERRUPT_IO
;
418 static void do_mchk_interrupt(CPUS390XState
*env
)
420 S390CPU
*cpu
= s390_env_get_cpu(env
);
426 if (!(env
->psw
.mask
& PSW_MASK_MCHECK
)) {
427 cpu_abort(CPU(cpu
), "Machine check w/o mchk mask\n");
430 if (env
->mchk_index
< 0 || env
->mchk_index
>= MAX_MCHK_QUEUE
) {
431 cpu_abort(CPU(cpu
), "Mchk queue overrun: %d\n", env
->mchk_index
);
434 q
= &env
->mchk_queue
[env
->mchk_index
];
437 /* Don't know how to handle this... */
438 cpu_abort(CPU(cpu
), "Unknown machine check type %d\n", q
->type
);
440 if (!(env
->cregs
[14] & (1 << 28))) {
441 /* CRW machine checks disabled */
445 lowcore
= cpu_map_lowcore(env
);
447 for (i
= 0; i
< 16; i
++) {
448 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(get_freg(env
, i
)->ll
);
449 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
450 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
451 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
453 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
454 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
455 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
456 lowcore
->cpu_timer_save_area
[0] = cpu_to_be32(env
->cputm
>> 32);
457 lowcore
->cpu_timer_save_area
[1] = cpu_to_be32((uint32_t)env
->cputm
);
458 lowcore
->clock_comp_save_area
[0] = cpu_to_be32(env
->ckc
>> 32);
459 lowcore
->clock_comp_save_area
[1] = cpu_to_be32((uint32_t)env
->ckc
);
461 lowcore
->mcck_interruption_code
[0] = cpu_to_be32(0x00400f1d);
462 lowcore
->mcck_interruption_code
[1] = cpu_to_be32(0x40330000);
463 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
464 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
465 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
466 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
468 cpu_unmap_lowcore(lowcore
);
471 if (env
->mchk_index
== -1) {
472 env
->pending_int
&= ~INTERRUPT_MCHK
;
475 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
476 env
->psw
.mask
, env
->psw
.addr
);
478 load_psw(env
, mask
, addr
);
481 void s390_cpu_do_interrupt(CPUState
*cs
)
483 S390CPU
*cpu
= S390_CPU(cs
);
484 CPUS390XState
*env
= &cpu
->env
;
486 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
487 __func__
, cs
->exception_index
, env
->psw
.addr
);
489 s390_cpu_set_state(CPU_STATE_OPERATING
, cpu
);
490 /* handle machine checks */
491 if ((env
->psw
.mask
& PSW_MASK_MCHECK
) &&
492 (cs
->exception_index
== -1)) {
493 if (env
->pending_int
& INTERRUPT_MCHK
) {
494 cs
->exception_index
= EXCP_MCHK
;
497 /* handle external interrupts */
498 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
499 cs
->exception_index
== -1) {
500 if (env
->pending_int
& INTERRUPT_EXT
) {
501 /* code is already in env */
502 cs
->exception_index
= EXCP_EXT
;
503 } else if (env
->pending_int
& INTERRUPT_TOD
) {
504 cpu_inject_ext(cpu
, 0x1004, 0, 0);
505 cs
->exception_index
= EXCP_EXT
;
506 env
->pending_int
&= ~INTERRUPT_EXT
;
507 env
->pending_int
&= ~INTERRUPT_TOD
;
508 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
509 cpu_inject_ext(cpu
, 0x1005, 0, 0);
510 cs
->exception_index
= EXCP_EXT
;
511 env
->pending_int
&= ~INTERRUPT_EXT
;
512 env
->pending_int
&= ~INTERRUPT_TOD
;
515 /* handle I/O interrupts */
516 if ((env
->psw
.mask
& PSW_MASK_IO
) &&
517 (cs
->exception_index
== -1)) {
518 if (env
->pending_int
& INTERRUPT_IO
) {
519 cs
->exception_index
= EXCP_IO
;
523 switch (cs
->exception_index
) {
525 do_program_interrupt(env
);
528 do_svc_interrupt(env
);
531 do_ext_interrupt(env
);
534 do_io_interrupt(env
);
537 do_mchk_interrupt(env
);
540 cs
->exception_index
= -1;
542 if (!env
->pending_int
) {
543 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
547 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
549 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
550 S390CPU
*cpu
= S390_CPU(cs
);
551 CPUS390XState
*env
= &cpu
->env
;
553 if (env
->psw
.mask
& PSW_MASK_EXT
) {
554 s390_cpu_do_interrupt(cs
);
560 #endif /* CONFIG_USER_ONLY */