4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/gdbstub.h"
23 #include "qemu/timer.h"
24 #include "exec/cpu_ldst.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
30 //#define DEBUG_S390_PTE
31 //#define DEBUG_S390_STDOUT
34 #ifdef DEBUG_S390_STDOUT
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
37 qemu_log(fmt, ##__VA_ARGS__); } while (0)
39 #define DPRINTF(fmt, ...) \
40 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
43 #define DPRINTF(fmt, ...) \
48 #define PTE_DPRINTF DPRINTF
50 #define PTE_DPRINTF(fmt, ...) \
54 #ifndef CONFIG_USER_ONLY
55 void s390x_tod_timer(void *opaque
)
57 S390CPU
*cpu
= opaque
;
58 CPUS390XState
*env
= &cpu
->env
;
60 env
->pending_int
|= INTERRUPT_TOD
;
61 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
64 void s390x_cpu_timer(void *opaque
)
66 S390CPU
*cpu
= opaque
;
67 CPUS390XState
*env
= &cpu
->env
;
69 env
->pending_int
|= INTERRUPT_CPUTIMER
;
70 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HARD
);
74 S390CPU
*cpu_s390x_init(const char *cpu_model
)
78 cpu
= S390_CPU(object_new(TYPE_S390_CPU
));
80 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
85 #if defined(CONFIG_USER_ONLY)
87 void s390_cpu_do_interrupt(CPUState
*cs
)
89 cs
->exception_index
= -1;
92 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
,
95 S390CPU
*cpu
= S390_CPU(cs
);
97 cs
->exception_index
= EXCP_PGM
;
98 cpu
->env
.int_pgm_code
= PGM_ADDRESSING
;
99 /* On real machines this value is dropped into LowMem. Since this
100 is userland, simply put this someplace that cpu_loop can find it. */
101 cpu
->env
.__excp_addr
= address
;
105 #else /* !CONFIG_USER_ONLY */
107 /* Ensure to exit the TB after this call! */
108 static void trigger_pgm_exception(CPUS390XState
*env
, uint32_t code
,
111 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
113 cs
->exception_index
= EXCP_PGM
;
114 env
->int_pgm_code
= code
;
115 env
->int_pgm_ilen
= ilen
;
118 static int trans_bits(CPUS390XState
*env
, uint64_t mode
)
120 S390CPU
*cpu
= s390_env_get_cpu(env
);
124 case PSW_ASC_PRIMARY
:
127 case PSW_ASC_SECONDARY
:
134 cpu_abort(CPU(cpu
), "unknown asc mode\n");
141 static void trigger_prot_fault(CPUS390XState
*env
, target_ulong vaddr
,
144 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
145 int ilen
= ILEN_LATER_INC
;
146 int bits
= trans_bits(env
, mode
) | 4;
148 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __func__
, vaddr
, bits
);
151 env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
152 trigger_pgm_exception(env
, PGM_PROTECTION
, ilen
);
155 static void trigger_page_fault(CPUS390XState
*env
, target_ulong vaddr
,
156 uint32_t type
, uint64_t asc
, int rw
)
158 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
159 int ilen
= ILEN_LATER
;
160 int bits
= trans_bits(env
, asc
);
162 /* Code accesses have an undefined ilc. */
167 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __func__
, vaddr
, bits
);
170 env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
171 trigger_pgm_exception(env
, type
, ilen
);
175 * Translate real address to absolute (= physical)
176 * address by taking care of the prefix mapping.
178 static target_ulong
mmu_real2abs(CPUS390XState
*env
, target_ulong raddr
)
180 if (raddr
< 0x2000) {
181 return raddr
+ env
->psa
; /* Map the lowcore. */
182 } else if (raddr
>= env
->psa
&& raddr
< env
->psa
+ 0x2000) {
183 return raddr
- env
->psa
; /* Map the 0 page. */
188 /* Decode page table entry (normal 4KB page) */
189 static int mmu_translate_pte(CPUS390XState
*env
, target_ulong vaddr
,
190 uint64_t asc
, uint64_t asce
,
191 target_ulong
*raddr
, int *flags
, int rw
)
193 if (asce
& _PAGE_INVALID
) {
194 DPRINTF("%s: PTE=0x%" PRIx64
" invalid\n", __func__
, asce
);
195 trigger_page_fault(env
, vaddr
, PGM_PAGE_TRANS
, asc
, rw
);
199 if (asce
& _PAGE_RO
) {
200 *flags
&= ~PAGE_WRITE
;
203 *raddr
= asce
& _ASCE_ORIGIN
;
205 PTE_DPRINTF("%s: PTE=0x%" PRIx64
"\n", __func__
, asce
);
210 /* Decode EDAT1 segment frame absolute address (1MB page) */
211 static int mmu_translate_sfaa(CPUS390XState
*env
, target_ulong vaddr
,
212 uint64_t asc
, uint64_t asce
, target_ulong
*raddr
,
215 if (asce
& _SEGMENT_ENTRY_INV
) {
216 DPRINTF("%s: SEG=0x%" PRIx64
" invalid\n", __func__
, asce
);
217 trigger_page_fault(env
, vaddr
, PGM_SEGMENT_TRANS
, asc
, rw
);
221 if (asce
& _SEGMENT_ENTRY_RO
) {
222 *flags
&= ~PAGE_WRITE
;
225 *raddr
= (asce
& 0xfffffffffff00000ULL
) | (vaddr
& 0xfffff);
227 PTE_DPRINTF("%s: SEG=0x%" PRIx64
"\n", __func__
, asce
);
232 static int mmu_translate_asce(CPUS390XState
*env
, target_ulong vaddr
,
233 uint64_t asc
, uint64_t asce
, int level
,
234 target_ulong
*raddr
, int *flags
, int rw
)
236 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
241 PTE_DPRINTF("%s: 0x%" PRIx64
"\n", __func__
, asce
);
243 if (((level
!= _ASCE_TYPE_SEGMENT
) && (asce
& _REGION_ENTRY_INV
)) ||
244 ((level
== _ASCE_TYPE_SEGMENT
) && (asce
& _SEGMENT_ENTRY_INV
))) {
245 /* XXX different regions have different faults */
246 DPRINTF("%s: invalid region\n", __func__
);
247 trigger_page_fault(env
, vaddr
, PGM_SEGMENT_TRANS
, asc
, rw
);
251 if ((level
<= _ASCE_TYPE_MASK
) && ((asce
& _ASCE_TYPE_MASK
) != level
)) {
252 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
256 if (asce
& _ASCE_REAL_SPACE
) {
263 origin
= asce
& _ASCE_ORIGIN
;
266 case _ASCE_TYPE_REGION1
+ 4:
267 offs
= (vaddr
>> 50) & 0x3ff8;
269 case _ASCE_TYPE_REGION1
:
270 offs
= (vaddr
>> 39) & 0x3ff8;
272 case _ASCE_TYPE_REGION2
:
273 offs
= (vaddr
>> 28) & 0x3ff8;
275 case _ASCE_TYPE_REGION3
:
276 offs
= (vaddr
>> 17) & 0x3ff8;
278 case _ASCE_TYPE_SEGMENT
:
279 offs
= (vaddr
>> 9) & 0x07f8;
280 origin
= asce
& _SEGMENT_ENTRY_ORIGIN
;
284 /* XXX region protection flags */
285 /* *flags &= ~PAGE_WRITE */
287 new_asce
= ldq_phys(cs
->as
, origin
+ offs
);
288 PTE_DPRINTF("%s: 0x%" PRIx64
" + 0x%" PRIx64
" => 0x%016" PRIx64
"\n",
289 __func__
, origin
, offs
, new_asce
);
291 if (level
== _ASCE_TYPE_SEGMENT
) {
293 return mmu_translate_pte(env
, vaddr
, asc
, new_asce
, raddr
, flags
, rw
);
294 } else if (level
- 4 == _ASCE_TYPE_SEGMENT
&&
295 (new_asce
& _SEGMENT_ENTRY_FC
) && (env
->cregs
[0] & CR0_EDAT
)) {
297 return mmu_translate_sfaa(env
, vaddr
, asc
, new_asce
, raddr
, flags
, rw
);
299 /* yet another region */
300 return mmu_translate_asce(env
, vaddr
, asc
, new_asce
, level
- 4, raddr
,
305 static int mmu_translate_asc(CPUS390XState
*env
, target_ulong vaddr
,
306 uint64_t asc
, target_ulong
*raddr
, int *flags
,
310 int level
, new_level
;
314 case PSW_ASC_PRIMARY
:
315 PTE_DPRINTF("%s: asc=primary\n", __func__
);
316 asce
= env
->cregs
[1];
318 case PSW_ASC_SECONDARY
:
319 PTE_DPRINTF("%s: asc=secondary\n", __func__
);
320 asce
= env
->cregs
[7];
323 PTE_DPRINTF("%s: asc=home\n", __func__
);
324 asce
= env
->cregs
[13];
328 switch (asce
& _ASCE_TYPE_MASK
) {
329 case _ASCE_TYPE_REGION1
:
331 case _ASCE_TYPE_REGION2
:
332 if (vaddr
& 0xffe0000000000000ULL
) {
333 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
334 " 0xffe0000000000000ULL\n", __func__
, vaddr
);
335 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
339 case _ASCE_TYPE_REGION3
:
340 if (vaddr
& 0xfffffc0000000000ULL
) {
341 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
342 " 0xfffffc0000000000ULL\n", __func__
, vaddr
);
343 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
347 case _ASCE_TYPE_SEGMENT
:
348 if (vaddr
& 0xffffffff80000000ULL
) {
349 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
350 " 0xffffffff80000000ULL\n", __func__
, vaddr
);
351 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
357 /* fake level above current */
358 level
= asce
& _ASCE_TYPE_MASK
;
359 new_level
= level
+ 4;
360 asce
= (asce
& ~_ASCE_TYPE_MASK
) | (new_level
& _ASCE_TYPE_MASK
);
362 r
= mmu_translate_asce(env
, vaddr
, asc
, asce
, new_level
, raddr
, flags
, rw
);
364 if ((rw
== 1) && !(*flags
& PAGE_WRITE
)) {
365 trigger_prot_fault(env
, vaddr
, asc
);
372 int mmu_translate(CPUS390XState
*env
, target_ulong vaddr
, int rw
, uint64_t asc
,
373 target_ulong
*raddr
, int *flags
)
378 *flags
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
379 vaddr
&= TARGET_PAGE_MASK
;
381 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
388 case PSW_ASC_PRIMARY
:
390 r
= mmu_translate_asc(env
, vaddr
, asc
, raddr
, flags
, rw
);
392 case PSW_ASC_SECONDARY
:
394 * Instruction: Primary
398 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_PRIMARY
, raddr
, flags
,
400 *flags
&= ~(PAGE_READ
| PAGE_WRITE
);
402 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_SECONDARY
, raddr
, flags
,
404 *flags
&= ~(PAGE_EXEC
);
409 hw_error("guest switched to unknown asc mode\n");
414 /* Convert real address -> absolute address */
415 *raddr
= mmu_real2abs(env
, *raddr
);
417 if (*raddr
<= ram_size
) {
418 sk
= &env
->storage_keys
[*raddr
/ TARGET_PAGE_SIZE
];
419 if (*flags
& PAGE_READ
) {
423 if (*flags
& PAGE_WRITE
) {
431 int s390_cpu_handle_mmu_fault(CPUState
*cs
, vaddr orig_vaddr
,
434 S390CPU
*cpu
= S390_CPU(cs
);
435 CPUS390XState
*env
= &cpu
->env
;
436 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
437 target_ulong vaddr
, raddr
;
440 DPRINTF("%s: address 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
441 __func__
, orig_vaddr
, rw
, mmu_idx
);
443 orig_vaddr
&= TARGET_PAGE_MASK
;
447 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
451 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
)) {
452 /* Translation ended in exception */
456 /* check out of RAM access */
457 if (raddr
> (ram_size
+ virtio_size
)) {
458 DPRINTF("%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
459 (uint64_t)raddr
, (uint64_t)ram_size
);
460 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILEN_LATER
);
464 DPRINTF("%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n", __func__
,
465 (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
467 tlb_set_page(cs
, orig_vaddr
, raddr
, prot
,
468 mmu_idx
, TARGET_PAGE_SIZE
);
473 hwaddr
s390_cpu_get_phys_page_debug(CPUState
*cs
, vaddr vaddr
)
475 S390CPU
*cpu
= S390_CPU(cs
);
476 CPUS390XState
*env
= &cpu
->env
;
478 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
479 int old_exc
= cs
->exception_index
;
480 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
483 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
487 mmu_translate(env
, vaddr
, 2, asc
, &raddr
, &prot
);
488 cs
->exception_index
= old_exc
;
493 hwaddr
s390_cpu_get_phys_addr_debug(CPUState
*cs
, vaddr vaddr
)
498 page
= vaddr
& TARGET_PAGE_MASK
;
499 phys_addr
= cpu_get_phys_page_debug(cs
, page
);
500 phys_addr
+= (vaddr
& ~TARGET_PAGE_MASK
);
505 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
507 env
->psw
.addr
= addr
;
508 env
->psw
.mask
= mask
;
509 env
->cc_op
= (mask
>> 44) & 3;
511 if (mask
& PSW_MASK_WAIT
) {
512 S390CPU
*cpu
= s390_env_get_cpu(env
);
513 if (s390_cpu_halt(cpu
) == 0) {
514 #ifndef CONFIG_USER_ONLY
515 qemu_system_shutdown_request();
521 static uint64_t get_psw_mask(CPUS390XState
*env
)
525 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
, env
->cc_vr
);
529 assert(!(env
->cc_op
& ~3));
530 r
|= (uint64_t)env
->cc_op
<< 44;
535 static LowCore
*cpu_map_lowcore(CPUS390XState
*env
)
537 S390CPU
*cpu
= s390_env_get_cpu(env
);
539 hwaddr len
= sizeof(LowCore
);
541 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
543 if (len
< sizeof(LowCore
)) {
544 cpu_abort(CPU(cpu
), "Could not map lowcore\n");
550 static void cpu_unmap_lowcore(LowCore
*lowcore
)
552 cpu_physical_memory_unmap(lowcore
, sizeof(LowCore
), 1, sizeof(LowCore
));
555 void *s390_cpu_physical_memory_map(CPUS390XState
*env
, hwaddr addr
, hwaddr
*len
,
560 /* Mind the prefix area. */
562 /* Map the lowcore. */
564 *len
= MIN(*len
, 8192 - addr
);
565 } else if ((addr
>= env
->psa
) && (addr
< env
->psa
+ 8192)) {
566 /* Map the 0 page. */
568 *len
= MIN(*len
, 8192 - start
);
571 return cpu_physical_memory_map(start
, len
, is_write
);
574 void s390_cpu_physical_memory_unmap(CPUS390XState
*env
, void *addr
, hwaddr len
,
577 cpu_physical_memory_unmap(addr
, len
, is_write
, len
);
580 static void do_svc_interrupt(CPUS390XState
*env
)
585 lowcore
= cpu_map_lowcore(env
);
587 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
588 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
589 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
590 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
591 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
592 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
594 cpu_unmap_lowcore(lowcore
);
596 load_psw(env
, mask
, addr
);
599 static void do_program_interrupt(CPUS390XState
*env
)
603 int ilen
= env
->int_pgm_ilen
;
607 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
610 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
611 env
->psw
.addr
+= ilen
;
614 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
617 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilen=%d\n",
618 __func__
, env
->int_pgm_code
, ilen
);
620 lowcore
= cpu_map_lowcore(env
);
622 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
623 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
624 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
625 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
626 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
627 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
629 cpu_unmap_lowcore(lowcore
);
631 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
632 env
->int_pgm_code
, ilen
, env
->psw
.mask
,
635 load_psw(env
, mask
, addr
);
638 #define VIRTIO_SUBCODE_64 0x0D00
640 static void do_ext_interrupt(CPUS390XState
*env
)
642 S390CPU
*cpu
= s390_env_get_cpu(env
);
647 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
648 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
651 if (env
->ext_index
< 0 || env
->ext_index
> MAX_EXT_QUEUE
) {
652 cpu_abort(CPU(cpu
), "Ext queue overrun: %d\n", env
->ext_index
);
655 q
= &env
->ext_queue
[env
->ext_index
];
656 lowcore
= cpu_map_lowcore(env
);
658 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
659 lowcore
->ext_params
= cpu_to_be32(q
->param
);
660 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
661 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
662 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
663 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
664 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
665 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
667 cpu_unmap_lowcore(lowcore
);
670 if (env
->ext_index
== -1) {
671 env
->pending_int
&= ~INTERRUPT_EXT
;
674 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
675 env
->psw
.mask
, env
->psw
.addr
);
677 load_psw(env
, mask
, addr
);
680 static void do_io_interrupt(CPUS390XState
*env
)
682 S390CPU
*cpu
= s390_env_get_cpu(env
);
689 if (!(env
->psw
.mask
& PSW_MASK_IO
)) {
690 cpu_abort(CPU(cpu
), "I/O int w/o I/O mask\n");
693 for (isc
= 0; isc
< ARRAY_SIZE(env
->io_index
); isc
++) {
696 if (env
->io_index
[isc
] < 0) {
699 if (env
->io_index
[isc
] > MAX_IO_QUEUE
) {
700 cpu_abort(CPU(cpu
), "I/O queue overrun for isc %d: %d\n",
701 isc
, env
->io_index
[isc
]);
704 q
= &env
->io_queue
[env
->io_index
[isc
]][isc
];
705 isc_bits
= ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q
->word
));
706 if (!(env
->cregs
[6] & isc_bits
)) {
714 lowcore
= cpu_map_lowcore(env
);
716 lowcore
->subchannel_id
= cpu_to_be16(q
->id
);
717 lowcore
->subchannel_nr
= cpu_to_be16(q
->nr
);
718 lowcore
->io_int_parm
= cpu_to_be32(q
->parm
);
719 lowcore
->io_int_word
= cpu_to_be32(q
->word
);
720 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
721 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
722 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
723 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
725 cpu_unmap_lowcore(lowcore
);
727 env
->io_index
[isc
]--;
729 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
730 env
->psw
.mask
, env
->psw
.addr
);
731 load_psw(env
, mask
, addr
);
733 if (env
->io_index
[isc
] >= 0) {
740 env
->pending_int
&= ~INTERRUPT_IO
;
745 static void do_mchk_interrupt(CPUS390XState
*env
)
747 S390CPU
*cpu
= s390_env_get_cpu(env
);
753 if (!(env
->psw
.mask
& PSW_MASK_MCHECK
)) {
754 cpu_abort(CPU(cpu
), "Machine check w/o mchk mask\n");
757 if (env
->mchk_index
< 0 || env
->mchk_index
> MAX_MCHK_QUEUE
) {
758 cpu_abort(CPU(cpu
), "Mchk queue overrun: %d\n", env
->mchk_index
);
761 q
= &env
->mchk_queue
[env
->mchk_index
];
764 /* Don't know how to handle this... */
765 cpu_abort(CPU(cpu
), "Unknown machine check type %d\n", q
->type
);
767 if (!(env
->cregs
[14] & (1 << 28))) {
768 /* CRW machine checks disabled */
772 lowcore
= cpu_map_lowcore(env
);
774 for (i
= 0; i
< 16; i
++) {
775 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(env
->fregs
[i
].ll
);
776 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
777 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
778 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
780 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
781 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
782 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
783 lowcore
->cpu_timer_save_area
[0] = cpu_to_be32(env
->cputm
>> 32);
784 lowcore
->cpu_timer_save_area
[1] = cpu_to_be32((uint32_t)env
->cputm
);
785 lowcore
->clock_comp_save_area
[0] = cpu_to_be32(env
->ckc
>> 32);
786 lowcore
->clock_comp_save_area
[1] = cpu_to_be32((uint32_t)env
->ckc
);
788 lowcore
->mcck_interruption_code
[0] = cpu_to_be32(0x00400f1d);
789 lowcore
->mcck_interruption_code
[1] = cpu_to_be32(0x40330000);
790 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
791 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
792 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
793 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
795 cpu_unmap_lowcore(lowcore
);
798 if (env
->mchk_index
== -1) {
799 env
->pending_int
&= ~INTERRUPT_MCHK
;
802 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
803 env
->psw
.mask
, env
->psw
.addr
);
805 load_psw(env
, mask
, addr
);
808 void s390_cpu_do_interrupt(CPUState
*cs
)
810 S390CPU
*cpu
= S390_CPU(cs
);
811 CPUS390XState
*env
= &cpu
->env
;
813 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
814 __func__
, cs
->exception_index
, env
->psw
.addr
);
816 s390_cpu_set_state(CPU_STATE_OPERATING
, cpu
);
817 /* handle machine checks */
818 if ((env
->psw
.mask
& PSW_MASK_MCHECK
) &&
819 (cs
->exception_index
== -1)) {
820 if (env
->pending_int
& INTERRUPT_MCHK
) {
821 cs
->exception_index
= EXCP_MCHK
;
824 /* handle external interrupts */
825 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
826 cs
->exception_index
== -1) {
827 if (env
->pending_int
& INTERRUPT_EXT
) {
828 /* code is already in env */
829 cs
->exception_index
= EXCP_EXT
;
830 } else if (env
->pending_int
& INTERRUPT_TOD
) {
831 cpu_inject_ext(cpu
, 0x1004, 0, 0);
832 cs
->exception_index
= EXCP_EXT
;
833 env
->pending_int
&= ~INTERRUPT_EXT
;
834 env
->pending_int
&= ~INTERRUPT_TOD
;
835 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
836 cpu_inject_ext(cpu
, 0x1005, 0, 0);
837 cs
->exception_index
= EXCP_EXT
;
838 env
->pending_int
&= ~INTERRUPT_EXT
;
839 env
->pending_int
&= ~INTERRUPT_TOD
;
842 /* handle I/O interrupts */
843 if ((env
->psw
.mask
& PSW_MASK_IO
) &&
844 (cs
->exception_index
== -1)) {
845 if (env
->pending_int
& INTERRUPT_IO
) {
846 cs
->exception_index
= EXCP_IO
;
850 switch (cs
->exception_index
) {
852 do_program_interrupt(env
);
855 do_svc_interrupt(env
);
858 do_ext_interrupt(env
);
861 do_io_interrupt(env
);
864 do_mchk_interrupt(env
);
867 cs
->exception_index
= -1;
869 if (!env
->pending_int
) {
870 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
874 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
876 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
877 S390CPU
*cpu
= S390_CPU(cs
);
878 CPUS390XState
*env
= &cpu
->env
;
880 if (env
->psw
.mask
& PSW_MASK_EXT
) {
881 s390_cpu_do_interrupt(cs
);
887 #endif /* CONFIG_USER_ONLY */