4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "qemu/qemu-print.h"
27 #include "hw/s390x/ioinst.h"
28 #include "hw/s390x/pv.h"
29 #include "sysemu/hw_accel.h"
30 #include "sysemu/runstate.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/tcg.h"
35 #ifndef CONFIG_USER_ONLY
36 void s390x_tod_timer(void *opaque
)
38 cpu_inject_clock_comparator((S390CPU
*) opaque
);
41 void s390x_cpu_timer(void *opaque
)
43 cpu_inject_cpu_timer((S390CPU
*) opaque
);
47 #ifndef CONFIG_USER_ONLY
49 hwaddr
s390_cpu_get_phys_page_debug(CPUState
*cs
, vaddr vaddr
)
51 S390CPU
*cpu
= S390_CPU(cs
);
52 CPUS390XState
*env
= &cpu
->env
;
55 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
59 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
63 /* We want to read the code (e.g., see what we are single-stepping).*/
64 if (asc
!= PSW_ASC_HOME
) {
65 asc
= PSW_ASC_PRIMARY
;
69 * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead
72 if (mmu_translate(env
, vaddr
, MMU_DATA_LOAD
, asc
, &raddr
, &prot
, &tec
)) {
78 hwaddr
s390_cpu_get_phys_addr_debug(CPUState
*cs
, vaddr vaddr
)
83 page
= vaddr
& TARGET_PAGE_MASK
;
84 phys_addr
= cpu_get_phys_page_debug(cs
, page
);
85 phys_addr
+= (vaddr
& ~TARGET_PAGE_MASK
);
90 static inline bool is_special_wait_psw(uint64_t psw_addr
)
93 return (psw_addr
& 0xfffUL
) == 0xfffUL
;
96 void s390_handle_wait(S390CPU
*cpu
)
98 CPUState
*cs
= CPU(cpu
);
100 if (s390_cpu_halt(cpu
) == 0) {
101 #ifndef CONFIG_USER_ONLY
102 if (is_special_wait_psw(cpu
->env
.psw
.addr
)) {
103 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
105 cpu
->env
.crash_reason
= S390_CRASH_REASON_DISABLED_WAIT
;
106 qemu_system_guest_panicked(cpu_get_crash_info(cs
));
112 void load_psw(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
114 uint64_t old_mask
= env
->psw
.mask
;
116 env
->psw
.addr
= addr
;
117 env
->psw
.mask
= mask
;
119 /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
120 if (!tcg_enabled()) {
123 env
->cc_op
= (mask
>> 44) & 3;
125 if ((old_mask
^ mask
) & PSW_MASK_PER
) {
126 s390_cpu_recompute_watchpoints(env_cpu(env
));
129 if (mask
& PSW_MASK_WAIT
) {
130 s390_handle_wait(env_archcpu(env
));
134 uint64_t get_psw_mask(CPUS390XState
*env
)
136 uint64_t r
= env
->psw
.mask
;
139 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
143 assert(!(env
->cc_op
& ~3));
144 r
|= (uint64_t)env
->cc_op
<< 44;
150 LowCore
*cpu_map_lowcore(CPUS390XState
*env
)
153 hwaddr len
= sizeof(LowCore
);
155 lowcore
= cpu_physical_memory_map(env
->psa
, &len
, true);
157 if (len
< sizeof(LowCore
)) {
158 cpu_abort(env_cpu(env
), "Could not map lowcore\n");
164 void cpu_unmap_lowcore(LowCore
*lowcore
)
166 cpu_physical_memory_unmap(lowcore
, sizeof(LowCore
), 1, sizeof(LowCore
));
169 void do_restart_interrupt(CPUS390XState
*env
)
174 lowcore
= cpu_map_lowcore(env
);
176 lowcore
->restart_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
177 lowcore
->restart_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
178 mask
= be64_to_cpu(lowcore
->restart_new_psw
.mask
);
179 addr
= be64_to_cpu(lowcore
->restart_new_psw
.addr
);
181 cpu_unmap_lowcore(lowcore
);
182 env
->pending_int
&= ~INTERRUPT_RESTART
;
184 load_psw(env
, mask
, addr
);
187 void s390_cpu_recompute_watchpoints(CPUState
*cs
)
189 const int wp_flags
= BP_CPU
| BP_MEM_WRITE
| BP_STOP_BEFORE_ACCESS
;
190 S390CPU
*cpu
= S390_CPU(cs
);
191 CPUS390XState
*env
= &cpu
->env
;
193 /* We are called when the watchpoints have changed. First
195 cpu_watchpoint_remove_all(cs
, BP_CPU
);
197 /* Return if PER is not enabled */
198 if (!(env
->psw
.mask
& PSW_MASK_PER
)) {
202 /* Return if storage-alteration event is not enabled. */
203 if (!(env
->cregs
[9] & PER_CR9_EVENT_STORE
)) {
207 if (env
->cregs
[10] == 0 && env
->cregs
[11] == -1LL) {
208 /* We can't create a watchoint spanning the whole memory range, so
209 split it in two parts. */
210 cpu_watchpoint_insert(cs
, 0, 1ULL << 63, wp_flags
, NULL
);
211 cpu_watchpoint_insert(cs
, 1ULL << 63, 1ULL << 63, wp_flags
, NULL
);
212 } else if (env
->cregs
[10] > env
->cregs
[11]) {
213 /* The address range loops, create two watchpoints. */
214 cpu_watchpoint_insert(cs
, env
->cregs
[10], -env
->cregs
[10],
216 cpu_watchpoint_insert(cs
, 0, env
->cregs
[11] + 1, wp_flags
, NULL
);
219 /* Default case, create a single watchpoint. */
220 cpu_watchpoint_insert(cs
, env
->cregs
[10],
221 env
->cregs
[11] - env
->cregs
[10] + 1,
226 typedef struct SigpSaveArea
{
227 uint64_t fprs
[16]; /* 0x0000 */
228 uint64_t grs
[16]; /* 0x0080 */
229 PSW psw
; /* 0x0100 */
230 uint8_t pad_0x0110
[0x0118 - 0x0110]; /* 0x0110 */
231 uint32_t prefix
; /* 0x0118 */
232 uint32_t fpc
; /* 0x011c */
233 uint8_t pad_0x0120
[0x0124 - 0x0120]; /* 0x0120 */
234 uint32_t todpr
; /* 0x0124 */
235 uint64_t cputm
; /* 0x0128 */
236 uint64_t ckc
; /* 0x0130 */
237 uint8_t pad_0x0138
[0x0140 - 0x0138]; /* 0x0138 */
238 uint32_t ars
[16]; /* 0x0140 */
239 uint64_t crs
[16]; /* 0x0384 */
241 QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea
) != 512);
243 int s390_store_status(S390CPU
*cpu
, hwaddr addr
, bool store_arch
)
245 static const uint8_t ar_id
= 1;
247 hwaddr len
= sizeof(*sa
);
250 /* For PVMs storing will occur when this cpu enters SIE again */
255 sa
= cpu_physical_memory_map(addr
, &len
, true);
259 if (len
!= sizeof(*sa
)) {
260 cpu_physical_memory_unmap(sa
, len
, 1, 0);
265 cpu_physical_memory_write(offsetof(LowCore
, ar_access_id
), &ar_id
, 1);
267 for (i
= 0; i
< 16; ++i
) {
268 sa
->fprs
[i
] = cpu_to_be64(*get_freg(&cpu
->env
, i
));
270 for (i
= 0; i
< 16; ++i
) {
271 sa
->grs
[i
] = cpu_to_be64(cpu
->env
.regs
[i
]);
273 sa
->psw
.addr
= cpu_to_be64(cpu
->env
.psw
.addr
);
274 sa
->psw
.mask
= cpu_to_be64(get_psw_mask(&cpu
->env
));
275 sa
->prefix
= cpu_to_be32(cpu
->env
.psa
);
276 sa
->fpc
= cpu_to_be32(cpu
->env
.fpc
);
277 sa
->todpr
= cpu_to_be32(cpu
->env
.todpr
);
278 sa
->cputm
= cpu_to_be64(cpu
->env
.cputm
);
279 sa
->ckc
= cpu_to_be64(cpu
->env
.ckc
>> 8);
280 for (i
= 0; i
< 16; ++i
) {
281 sa
->ars
[i
] = cpu_to_be32(cpu
->env
.aregs
[i
]);
283 for (i
= 0; i
< 16; ++i
) {
284 sa
->crs
[i
] = cpu_to_be64(cpu
->env
.cregs
[i
]);
287 cpu_physical_memory_unmap(sa
, len
, 1, len
);
292 typedef struct SigpAdtlSaveArea
{
293 uint64_t vregs
[32][2]; /* 0x0000 */
294 uint8_t pad_0x0200
[0x0400 - 0x0200]; /* 0x0200 */
295 uint64_t gscb
[4]; /* 0x0400 */
296 uint8_t pad_0x0420
[0x1000 - 0x0420]; /* 0x0420 */
298 QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea
) != 4096);
300 #define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
301 int s390_store_adtl_status(S390CPU
*cpu
, hwaddr addr
, hwaddr len
)
303 SigpAdtlSaveArea
*sa
;
307 sa
= cpu_physical_memory_map(addr
, &save
, true);
312 cpu_physical_memory_unmap(sa
, len
, 1, 0);
316 if (s390_has_feat(S390_FEAT_VECTOR
)) {
317 for (i
= 0; i
< 32; i
++) {
318 sa
->vregs
[i
][0] = cpu_to_be64(cpu
->env
.vregs
[i
][0]);
319 sa
->vregs
[i
][1] = cpu_to_be64(cpu
->env
.vregs
[i
][1]);
322 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE
) && len
>= ADTL_GS_MIN_SIZE
) {
323 for (i
= 0; i
< 4; i
++) {
324 sa
->gscb
[i
] = cpu_to_be64(cpu
->env
.gscb
[i
]);
328 cpu_physical_memory_unmap(sa
, len
, 1, len
);
331 #endif /* CONFIG_USER_ONLY */
333 void s390_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
335 S390CPU
*cpu
= S390_CPU(cs
);
336 CPUS390XState
*env
= &cpu
->env
;
339 if (env
->cc_op
> 3) {
340 qemu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %15s\n",
341 env
->psw
.mask
, env
->psw
.addr
, cc_name(env
->cc_op
));
343 qemu_fprintf(f
, "PSW=mask %016" PRIx64
" addr %016" PRIx64
" cc %02x\n",
344 env
->psw
.mask
, env
->psw
.addr
, env
->cc_op
);
347 for (i
= 0; i
< 16; i
++) {
348 qemu_fprintf(f
, "R%02d=%016" PRIx64
, i
, env
->regs
[i
]);
350 qemu_fprintf(f
, "\n");
352 qemu_fprintf(f
, " ");
356 if (flags
& CPU_DUMP_FPU
) {
357 if (s390_has_feat(S390_FEAT_VECTOR
)) {
358 for (i
= 0; i
< 32; i
++) {
359 qemu_fprintf(f
, "V%02d=%016" PRIx64
"%016" PRIx64
"%c",
360 i
, env
->vregs
[i
][0], env
->vregs
[i
][1],
364 for (i
= 0; i
< 16; i
++) {
365 qemu_fprintf(f
, "F%02d=%016" PRIx64
"%c",
366 i
, *get_freg(env
, i
),
367 (i
% 4) == 3 ? '\n' : ' ');
372 #ifndef CONFIG_USER_ONLY
373 for (i
= 0; i
< 16; i
++) {
374 qemu_fprintf(f
, "C%02d=%016" PRIx64
, i
, env
->cregs
[i
]);
376 qemu_fprintf(f
, "\n");
378 qemu_fprintf(f
, " ");
383 #ifdef DEBUG_INLINE_BRANCHES
384 for (i
= 0; i
< CC_OP_MAX
; i
++) {
385 qemu_fprintf(f
, " %15s = %10ld\t%10ld\n", cc_name(i
),
386 inline_branch_miss
[i
], inline_branch_hit
[i
]);
390 qemu_fprintf(f
, "\n");
393 const char *cc_name(enum cc_op cc_op
)
395 static const char * const cc_names
[] = {
396 [CC_OP_CONST0
] = "CC_OP_CONST0",
397 [CC_OP_CONST1
] = "CC_OP_CONST1",
398 [CC_OP_CONST2
] = "CC_OP_CONST2",
399 [CC_OP_CONST3
] = "CC_OP_CONST3",
400 [CC_OP_DYNAMIC
] = "CC_OP_DYNAMIC",
401 [CC_OP_STATIC
] = "CC_OP_STATIC",
402 [CC_OP_NZ
] = "CC_OP_NZ",
403 [CC_OP_LTGT_32
] = "CC_OP_LTGT_32",
404 [CC_OP_LTGT_64
] = "CC_OP_LTGT_64",
405 [CC_OP_LTUGTU_32
] = "CC_OP_LTUGTU_32",
406 [CC_OP_LTUGTU_64
] = "CC_OP_LTUGTU_64",
407 [CC_OP_LTGT0_32
] = "CC_OP_LTGT0_32",
408 [CC_OP_LTGT0_64
] = "CC_OP_LTGT0_64",
409 [CC_OP_ADD_64
] = "CC_OP_ADD_64",
410 [CC_OP_ADDU_64
] = "CC_OP_ADDU_64",
411 [CC_OP_ADDC_64
] = "CC_OP_ADDC_64",
412 [CC_OP_SUB_64
] = "CC_OP_SUB_64",
413 [CC_OP_SUBU_64
] = "CC_OP_SUBU_64",
414 [CC_OP_SUBB_64
] = "CC_OP_SUBB_64",
415 [CC_OP_ABS_64
] = "CC_OP_ABS_64",
416 [CC_OP_NABS_64
] = "CC_OP_NABS_64",
417 [CC_OP_ADD_32
] = "CC_OP_ADD_32",
418 [CC_OP_ADDU_32
] = "CC_OP_ADDU_32",
419 [CC_OP_ADDC_32
] = "CC_OP_ADDC_32",
420 [CC_OP_SUB_32
] = "CC_OP_SUB_32",
421 [CC_OP_SUBU_32
] = "CC_OP_SUBU_32",
422 [CC_OP_SUBB_32
] = "CC_OP_SUBB_32",
423 [CC_OP_ABS_32
] = "CC_OP_ABS_32",
424 [CC_OP_NABS_32
] = "CC_OP_NABS_32",
425 [CC_OP_COMP_32
] = "CC_OP_COMP_32",
426 [CC_OP_COMP_64
] = "CC_OP_COMP_64",
427 [CC_OP_TM_32
] = "CC_OP_TM_32",
428 [CC_OP_TM_64
] = "CC_OP_TM_64",
429 [CC_OP_NZ_F32
] = "CC_OP_NZ_F32",
430 [CC_OP_NZ_F64
] = "CC_OP_NZ_F64",
431 [CC_OP_NZ_F128
] = "CC_OP_NZ_F128",
432 [CC_OP_ICM
] = "CC_OP_ICM",
433 [CC_OP_SLA_32
] = "CC_OP_SLA_32",
434 [CC_OP_SLA_64
] = "CC_OP_SLA_64",
435 [CC_OP_FLOGR
] = "CC_OP_FLOGR",
436 [CC_OP_LCBB
] = "CC_OP_LCBB",
437 [CC_OP_VC
] = "CC_OP_VC",
440 return cc_names
[cc_op
];