4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/gdbstub.h"
23 #include "qemu/timer.h"
24 #ifndef CONFIG_USER_ONLY
25 #include "sysemu/sysemu.h"
29 //#define DEBUG_S390_PTE
30 //#define DEBUG_S390_STDOUT
33 #ifdef DEBUG_S390_STDOUT
34 #define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36 qemu_log(fmt, ##__VA_ARGS__); } while (0)
38 #define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
42 #define DPRINTF(fmt, ...) \
47 #define PTE_DPRINTF DPRINTF
49 #define PTE_DPRINTF(fmt, ...) \
53 #ifndef CONFIG_USER_ONLY
54 void s390x_tod_timer(void *opaque
)
56 S390CPU
*cpu
= opaque
;
57 CPUS390XState
*env
= &cpu
->env
;
59 env
->pending_int
|= INTERRUPT_TOD
;
60 cpu_interrupt(env
, CPU_INTERRUPT_HARD
);
63 void s390x_cpu_timer(void *opaque
)
65 S390CPU
*cpu
= opaque
;
66 CPUS390XState
*env
= &cpu
->env
;
68 env
->pending_int
|= INTERRUPT_CPUTIMER
;
69 cpu_interrupt(env
, CPU_INTERRUPT_HARD
);
73 S390CPU
*cpu_s390x_init(const char *cpu_model
)
79 cpu
= S390_CPU(object_new(TYPE_S390_CPU
));
82 if (tcg_enabled() && !inited
) {
84 s390x_translate_init();
87 env
->cpu_model_str
= cpu_model
;
92 #if defined(CONFIG_USER_ONLY)
94 void do_interrupt(CPUS390XState
*env
)
96 env
->exception_index
= -1;
99 int cpu_s390x_handle_mmu_fault(CPUS390XState
*env
, target_ulong address
,
102 /* fprintf(stderr, "%s: address 0x%lx rw %d mmu_idx %d\n",
103 __func__, address, rw, mmu_idx); */
104 env
->exception_index
= EXCP_ADDR
;
105 /* FIXME: find out how this works on a real machine */
106 env
->__excp_addr
= address
;
110 #else /* !CONFIG_USER_ONLY */
112 /* Ensure to exit the TB after this call! */
113 static void trigger_pgm_exception(CPUS390XState
*env
, uint32_t code
,
116 env
->exception_index
= EXCP_PGM
;
117 env
->int_pgm_code
= code
;
118 env
->int_pgm_ilc
= ilc
;
121 static int trans_bits(CPUS390XState
*env
, uint64_t mode
)
126 case PSW_ASC_PRIMARY
:
129 case PSW_ASC_SECONDARY
:
136 cpu_abort(env
, "unknown asc mode\n");
143 static void trigger_prot_fault(CPUS390XState
*env
, target_ulong vaddr
,
146 int ilc
= ILC_LATER_INC_2
;
147 int bits
= trans_bits(env
, mode
) | 4;
149 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __func__
, vaddr
, bits
);
151 stq_phys(env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
152 trigger_pgm_exception(env
, PGM_PROTECTION
, ilc
);
155 static void trigger_page_fault(CPUS390XState
*env
, target_ulong vaddr
,
156 uint32_t type
, uint64_t asc
, int rw
)
159 int bits
= trans_bits(env
, asc
);
162 /* code has is undefined ilc */
166 DPRINTF("%s: vaddr=%016" PRIx64
" bits=%d\n", __func__
, vaddr
, bits
);
168 stq_phys(env
->psa
+ offsetof(LowCore
, trans_exc_code
), vaddr
| bits
);
169 trigger_pgm_exception(env
, type
, ilc
);
172 static int mmu_translate_asce(CPUS390XState
*env
, target_ulong vaddr
,
173 uint64_t asc
, uint64_t asce
, int level
,
174 target_ulong
*raddr
, int *flags
, int rw
)
180 PTE_DPRINTF("%s: 0x%" PRIx64
"\n", __func__
, asce
);
182 if (((level
!= _ASCE_TYPE_SEGMENT
) && (asce
& _REGION_ENTRY_INV
)) ||
183 ((level
== _ASCE_TYPE_SEGMENT
) && (asce
& _SEGMENT_ENTRY_INV
))) {
184 /* XXX different regions have different faults */
185 DPRINTF("%s: invalid region\n", __func__
);
186 trigger_page_fault(env
, vaddr
, PGM_SEGMENT_TRANS
, asc
, rw
);
190 if ((level
<= _ASCE_TYPE_MASK
) && ((asce
& _ASCE_TYPE_MASK
) != level
)) {
191 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
195 if (asce
& _ASCE_REAL_SPACE
) {
202 origin
= asce
& _ASCE_ORIGIN
;
205 case _ASCE_TYPE_REGION1
+ 4:
206 offs
= (vaddr
>> 50) & 0x3ff8;
208 case _ASCE_TYPE_REGION1
:
209 offs
= (vaddr
>> 39) & 0x3ff8;
211 case _ASCE_TYPE_REGION2
:
212 offs
= (vaddr
>> 28) & 0x3ff8;
214 case _ASCE_TYPE_REGION3
:
215 offs
= (vaddr
>> 17) & 0x3ff8;
217 case _ASCE_TYPE_SEGMENT
:
218 offs
= (vaddr
>> 9) & 0x07f8;
219 origin
= asce
& _SEGMENT_ENTRY_ORIGIN
;
223 /* XXX region protection flags */
224 /* *flags &= ~PAGE_WRITE */
226 new_asce
= ldq_phys(origin
+ offs
);
227 PTE_DPRINTF("%s: 0x%" PRIx64
" + 0x%" PRIx64
" => 0x%016" PRIx64
"\n",
228 __func__
, origin
, offs
, new_asce
);
230 if (level
!= _ASCE_TYPE_SEGMENT
) {
231 /* yet another region */
232 return mmu_translate_asce(env
, vaddr
, asc
, new_asce
, level
- 4, raddr
,
237 if (new_asce
& _PAGE_INVALID
) {
238 DPRINTF("%s: PTE=0x%" PRIx64
" invalid\n", __func__
, new_asce
);
239 trigger_page_fault(env
, vaddr
, PGM_PAGE_TRANS
, asc
, rw
);
243 if (new_asce
& _PAGE_RO
) {
244 *flags
&= ~PAGE_WRITE
;
247 *raddr
= new_asce
& _ASCE_ORIGIN
;
249 PTE_DPRINTF("%s: PTE=0x%" PRIx64
"\n", __func__
, new_asce
);
254 static int mmu_translate_asc(CPUS390XState
*env
, target_ulong vaddr
,
255 uint64_t asc
, target_ulong
*raddr
, int *flags
,
259 int level
, new_level
;
263 case PSW_ASC_PRIMARY
:
264 PTE_DPRINTF("%s: asc=primary\n", __func__
);
265 asce
= env
->cregs
[1];
267 case PSW_ASC_SECONDARY
:
268 PTE_DPRINTF("%s: asc=secondary\n", __func__
);
269 asce
= env
->cregs
[7];
272 PTE_DPRINTF("%s: asc=home\n", __func__
);
273 asce
= env
->cregs
[13];
277 switch (asce
& _ASCE_TYPE_MASK
) {
278 case _ASCE_TYPE_REGION1
:
280 case _ASCE_TYPE_REGION2
:
281 if (vaddr
& 0xffe0000000000000ULL
) {
282 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
283 " 0xffe0000000000000ULL\n", __func__
, vaddr
);
284 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
288 case _ASCE_TYPE_REGION3
:
289 if (vaddr
& 0xfffffc0000000000ULL
) {
290 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
291 " 0xfffffc0000000000ULL\n", __func__
, vaddr
);
292 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
296 case _ASCE_TYPE_SEGMENT
:
297 if (vaddr
& 0xffffffff80000000ULL
) {
298 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
299 " 0xffffffff80000000ULL\n", __func__
, vaddr
);
300 trigger_page_fault(env
, vaddr
, PGM_TRANS_SPEC
, asc
, rw
);
306 /* fake level above current */
307 level
= asce
& _ASCE_TYPE_MASK
;
308 new_level
= level
+ 4;
309 asce
= (asce
& ~_ASCE_TYPE_MASK
) | (new_level
& _ASCE_TYPE_MASK
);
311 r
= mmu_translate_asce(env
, vaddr
, asc
, asce
, new_level
, raddr
, flags
, rw
);
313 if ((rw
== 1) && !(*flags
& PAGE_WRITE
)) {
314 trigger_prot_fault(env
, vaddr
, asc
);
321 int mmu_translate(CPUS390XState
*env
, target_ulong vaddr
, int rw
, uint64_t asc
,
322 target_ulong
*raddr
, int *flags
)
327 *flags
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
328 vaddr
&= TARGET_PAGE_MASK
;
330 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
337 case PSW_ASC_PRIMARY
:
339 r
= mmu_translate_asc(env
, vaddr
, asc
, raddr
, flags
, rw
);
341 case PSW_ASC_SECONDARY
:
343 * Instruction: Primary
347 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_PRIMARY
, raddr
, flags
,
349 *flags
&= ~(PAGE_READ
| PAGE_WRITE
);
351 r
= mmu_translate_asc(env
, vaddr
, PSW_ASC_SECONDARY
, raddr
, flags
,
353 *flags
&= ~(PAGE_EXEC
);
358 hw_error("guest switched to unknown asc mode\n");
363 /* Convert real address -> absolute address */
364 if (*raddr
< 0x2000) {
365 *raddr
= *raddr
+ env
->psa
;
368 if (*raddr
<= ram_size
) {
369 sk
= &env
->storage_keys
[*raddr
/ TARGET_PAGE_SIZE
];
370 if (*flags
& PAGE_READ
) {
374 if (*flags
& PAGE_WRITE
) {
382 int cpu_s390x_handle_mmu_fault(CPUS390XState
*env
, target_ulong orig_vaddr
,
385 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
386 target_ulong vaddr
, raddr
;
389 DPRINTF("%s: address 0x%" PRIx64
" rw %d mmu_idx %d\n",
390 __func__
, _vaddr
, rw
, mmu_idx
);
392 orig_vaddr
&= TARGET_PAGE_MASK
;
396 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
400 if (mmu_translate(env
, vaddr
, rw
, asc
, &raddr
, &prot
)) {
401 /* Translation ended in exception */
405 /* check out of RAM access */
406 if (raddr
> (ram_size
+ virtio_size
)) {
407 DPRINTF("%s: aaddr %" PRIx64
" > ram_size %" PRIx64
"\n", __func__
,
408 (uint64_t)aaddr
, (uint64_t)ram_size
);
409 trigger_pgm_exception(env
, PGM_ADDRESSING
, ILC_LATER
);
413 DPRINTF("%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n", __func__
,
414 (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
416 tlb_set_page(env
, orig_vaddr
, raddr
, prot
,
417 mmu_idx
, TARGET_PAGE_SIZE
);
422 hwaddr
cpu_get_phys_page_debug(CPUS390XState
*env
,
426 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
427 int old_exc
= env
->exception_index
;
428 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
431 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
435 mmu_translate(env
, vaddr
, 2, asc
, &raddr
, &prot
);
436 env
->exception_index
= old_exc
;
441 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
443 if (mask
& PSW_MASK_WAIT
) {
444 if (!(mask
& (PSW_MASK_IO
| PSW_MASK_EXT
| PSW_MASK_MCHECK
))) {
445 if (s390_del_running_cpu(env
) == 0) {
446 #ifndef CONFIG_USER_ONLY
447 qemu_system_shutdown_request();
452 env
->exception_index
= EXCP_HLT
;
455 env
->psw
.addr
= addr
;
456 env
->psw
.mask
= mask
;
457 env
->cc_op
= (mask
>> 13) & 3;
460 static uint64_t get_psw_mask(CPUS390XState
*env
)
462 uint64_t r
= env
->psw
.mask
;
464 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
, env
->cc_vr
);
467 assert(!(env
->cc_op
& ~3));
468 r
|= env
->cc_op
<< 13;
473 static void do_svc_interrupt(CPUS390XState
*env
)
477 hwaddr len
= TARGET_PAGE_SIZE
;
479 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
481 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
482 lowcore
->svc_ilc
= cpu_to_be16(env
->int_svc_ilc
);
483 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
484 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ (env
->int_svc_ilc
));
485 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
486 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
488 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
490 load_psw(env
, mask
, addr
);
493 static void do_program_interrupt(CPUS390XState
*env
)
497 hwaddr len
= TARGET_PAGE_SIZE
;
498 int ilc
= env
->int_pgm_ilc
;
502 ilc
= get_ilc(cpu_ldub_code(env
, env
->psw
.addr
));
505 ilc
= get_ilc(cpu_ldub_code(env
, env
->psw
.addr
));
506 env
->psw
.addr
+= ilc
* 2;
508 case ILC_LATER_INC_2
:
509 ilc
= get_ilc(cpu_ldub_code(env
, env
->psw
.addr
)) * 2;
510 env
->psw
.addr
+= ilc
;
514 qemu_log_mask(CPU_LOG_INT
, "%s: code=0x%x ilc=%d\n",
515 __func__
, env
->int_pgm_code
, ilc
);
517 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
519 lowcore
->pgm_ilc
= cpu_to_be16(ilc
);
520 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
521 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
522 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
523 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
524 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
526 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
528 DPRINTF("%s: %x %x %" PRIx64
" %" PRIx64
"\n", __func__
,
529 env
->int_pgm_code
, ilc
, env
->psw
.mask
,
532 load_psw(env
, mask
, addr
);
535 #define VIRTIO_SUBCODE_64 0x0D00
537 static void do_ext_interrupt(CPUS390XState
*env
)
541 hwaddr len
= TARGET_PAGE_SIZE
;
544 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
545 cpu_abort(env
, "Ext int w/o ext mask\n");
548 if (env
->ext_index
< 0 || env
->ext_index
> MAX_EXT_QUEUE
) {
549 cpu_abort(env
, "Ext queue overrun: %d\n", env
->ext_index
);
552 q
= &env
->ext_queue
[env
->ext_index
];
553 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, 1);
555 lowcore
->ext_int_code
= cpu_to_be16(q
->code
);
556 lowcore
->ext_params
= cpu_to_be32(q
->param
);
557 lowcore
->ext_params2
= cpu_to_be64(q
->param64
);
558 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
559 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
560 lowcore
->cpu_addr
= cpu_to_be16(env
->cpu_num
| VIRTIO_SUBCODE_64
);
561 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
562 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
564 cpu_physical_memory_unmap(lowcore
, len
, 1, len
);
567 if (env
->ext_index
== -1) {
568 env
->pending_int
&= ~INTERRUPT_EXT
;
571 DPRINTF("%s: %" PRIx64
" %" PRIx64
"\n", __func__
,
572 env
->psw
.mask
, env
->psw
.addr
);
574 load_psw(env
, mask
, addr
);
577 void do_interrupt(CPUS390XState
*env
)
579 qemu_log_mask(CPU_LOG_INT
, "%s: %d at pc=%" PRIx64
"\n",
580 __func__
, env
->exception_index
, env
->psw
.addr
);
582 s390_add_running_cpu(env
);
583 /* handle external interrupts */
584 if ((env
->psw
.mask
& PSW_MASK_EXT
) &&
585 env
->exception_index
== -1) {
586 if (env
->pending_int
& INTERRUPT_EXT
) {
587 /* code is already in env */
588 env
->exception_index
= EXCP_EXT
;
589 } else if (env
->pending_int
& INTERRUPT_TOD
) {
590 cpu_inject_ext(env
, 0x1004, 0, 0);
591 env
->exception_index
= EXCP_EXT
;
592 env
->pending_int
&= ~INTERRUPT_EXT
;
593 env
->pending_int
&= ~INTERRUPT_TOD
;
594 } else if (env
->pending_int
& INTERRUPT_CPUTIMER
) {
595 cpu_inject_ext(env
, 0x1005, 0, 0);
596 env
->exception_index
= EXCP_EXT
;
597 env
->pending_int
&= ~INTERRUPT_EXT
;
598 env
->pending_int
&= ~INTERRUPT_TOD
;
602 switch (env
->exception_index
) {
604 do_program_interrupt(env
);
607 do_svc_interrupt(env
);
610 do_ext_interrupt(env
);
613 env
->exception_index
= -1;
615 if (!env
->pending_int
) {
616 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
620 #endif /* CONFIG_USER_ONLY */