s390x/kvm: drop KVMState parameter from kvm_s390_set_mem_limit()
[qemu.git] / target / s390x / cpu.h
blob798e299db1a0ed124c1480cf0352d18c39447d12
1 /*
2 * S/390 virtual CPU header
4 * Copyright (c) 2009 Ulrich Hecht
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * Contributions after 2012-10-29 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
19 * You should have received a copy of the GNU (Lesser) General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #ifndef S390X_CPU_H
24 #define S390X_CPU_H
26 #include "qemu-common.h"
27 #include "cpu-qom.h"
29 #define TARGET_LONG_BITS 64
31 #define ELF_MACHINE_UNAME "S390X"
33 #define CPUArchState struct CPUS390XState
35 #include "exec/cpu-defs.h"
36 #define TARGET_PAGE_BITS 12
38 #define TARGET_PHYS_ADDR_SPACE_BITS 64
39 #define TARGET_VIRT_ADDR_SPACE_BITS 64
41 #include "exec/cpu-all.h"
43 #include "fpu/softfloat.h"
45 #define NB_MMU_MODES 3
46 #define TARGET_INSN_START_EXTRA_WORDS 1
48 #define MMU_MODE0_SUFFIX _primary
49 #define MMU_MODE1_SUFFIX _secondary
50 #define MMU_MODE2_SUFFIX _home
52 #define MMU_USER_IDX 0
54 #define MAX_EXT_QUEUE 16
55 #define MAX_IO_QUEUE 16
56 #define MAX_MCHK_QUEUE 16
58 #define PSW_MCHK_MASK 0x0004000000000000
59 #define PSW_IO_MASK 0x0200000000000000
61 typedef struct PSW {
62 uint64_t mask;
63 uint64_t addr;
64 } PSW;
66 typedef struct ExtQueue {
67 uint32_t code;
68 uint32_t param;
69 uint32_t param64;
70 } ExtQueue;
72 typedef struct IOIntQueue {
73 uint16_t id;
74 uint16_t nr;
75 uint32_t parm;
76 uint32_t word;
77 } IOIntQueue;
79 typedef struct MchkQueue {
80 uint16_t type;
81 } MchkQueue;
83 typedef struct CPUS390XState {
84 uint64_t regs[16]; /* GP registers */
86 * The floating point registers are part of the vector registers.
87 * vregs[0][0] -> vregs[15][0] are 16 floating point registers
89 CPU_DoubleU vregs[32][2]; /* vector registers */
90 uint32_t aregs[16]; /* access registers */
91 uint8_t riccb[64]; /* runtime instrumentation control */
92 uint64_t gscb[4]; /* guarded storage control */
94 /* Fields up to this point are not cleared by initial CPU reset */
95 struct {} start_initial_reset_fields;
97 uint32_t fpc; /* floating-point control register */
98 uint32_t cc_op;
100 float_status fpu_status; /* passed to softfloat lib */
102 /* The low part of a 128-bit return, or remainder of a divide. */
103 uint64_t retxl;
105 PSW psw;
107 uint64_t cc_src;
108 uint64_t cc_dst;
109 uint64_t cc_vr;
111 uint64_t ex_value;
113 uint64_t __excp_addr;
114 uint64_t psa;
116 uint32_t int_pgm_code;
117 uint32_t int_pgm_ilen;
119 uint32_t int_svc_code;
120 uint32_t int_svc_ilen;
122 uint64_t per_address;
123 uint16_t per_perc_atmid;
125 uint64_t cregs[16]; /* control registers */
127 ExtQueue ext_queue[MAX_EXT_QUEUE];
128 IOIntQueue io_queue[MAX_IO_QUEUE][8];
129 MchkQueue mchk_queue[MAX_MCHK_QUEUE];
131 int pending_int;
132 int ext_index;
133 int io_index[8];
134 int mchk_index;
136 uint64_t ckc;
137 uint64_t cputm;
138 uint32_t todpr;
140 uint64_t pfault_token;
141 uint64_t pfault_compare;
142 uint64_t pfault_select;
144 uint64_t gbea;
145 uint64_t pp;
147 /* Fields up to this point are cleared by a CPU reset */
148 struct {} end_reset_fields;
150 CPU_COMMON
152 uint32_t cpu_num;
153 uint64_t cpuid;
155 uint64_t tod_offset;
156 uint64_t tod_basetime;
157 QEMUTimer *tod_timer;
159 QEMUTimer *cpu_timer;
162 * The cpu state represents the logical state of a cpu. In contrast to other
163 * architectures, there is a difference between a halt and a stop on s390.
164 * If all cpus are either stopped (including check stop) or in the disabled
165 * wait state, the vm can be shut down.
167 #define CPU_STATE_UNINITIALIZED 0x00
168 #define CPU_STATE_STOPPED 0x01
169 #define CPU_STATE_CHECK_STOP 0x02
170 #define CPU_STATE_OPERATING 0x03
171 #define CPU_STATE_LOAD 0x04
172 uint8_t cpu_state;
174 /* currently processed sigp order */
175 uint8_t sigp_order;
177 } CPUS390XState;
179 static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
181 return &cs->vregs[nr][0];
185 * S390CPU:
186 * @env: #CPUS390XState.
188 * An S/390 CPU.
190 struct S390CPU {
191 /*< private >*/
192 CPUState parent_obj;
193 /*< public >*/
195 CPUS390XState env;
196 int64_t id;
197 S390CPUModel *model;
198 /* needed for live migration */
199 void *irqstate;
200 uint32_t irqstate_saved_size;
203 static inline S390CPU *s390_env_get_cpu(CPUS390XState *env)
205 return container_of(env, S390CPU, env);
208 #define ENV_GET_CPU(e) CPU(s390_env_get_cpu(e))
210 #define ENV_OFFSET offsetof(S390CPU, env)
212 #ifndef CONFIG_USER_ONLY
213 extern const struct VMStateDescription vmstate_s390_cpu;
214 #endif
216 void s390_cpu_do_interrupt(CPUState *cpu);
217 bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
218 void s390_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
219 int flags);
220 int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
221 int cpuid, void *opaque);
223 hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
224 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
225 int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
226 int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
227 void s390_cpu_gdb_init(CPUState *cs);
228 void s390x_cpu_debug_excp_handler(CPUState *cs);
230 #include "sysemu/kvm.h"
232 /* distinguish between 24 bit and 31 bit addressing */
233 #define HIGH_ORDER_BIT 0x80000000
235 /* Interrupt Codes */
236 /* Program Interrupts */
237 #define PGM_OPERATION 0x0001
238 #define PGM_PRIVILEGED 0x0002
239 #define PGM_EXECUTE 0x0003
240 #define PGM_PROTECTION 0x0004
241 #define PGM_ADDRESSING 0x0005
242 #define PGM_SPECIFICATION 0x0006
243 #define PGM_DATA 0x0007
244 #define PGM_FIXPT_OVERFLOW 0x0008
245 #define PGM_FIXPT_DIVIDE 0x0009
246 #define PGM_DEC_OVERFLOW 0x000a
247 #define PGM_DEC_DIVIDE 0x000b
248 #define PGM_HFP_EXP_OVERFLOW 0x000c
249 #define PGM_HFP_EXP_UNDERFLOW 0x000d
250 #define PGM_HFP_SIGNIFICANCE 0x000e
251 #define PGM_HFP_DIVIDE 0x000f
252 #define PGM_SEGMENT_TRANS 0x0010
253 #define PGM_PAGE_TRANS 0x0011
254 #define PGM_TRANS_SPEC 0x0012
255 #define PGM_SPECIAL_OP 0x0013
256 #define PGM_OPERAND 0x0015
257 #define PGM_TRACE_TABLE 0x0016
258 #define PGM_SPACE_SWITCH 0x001c
259 #define PGM_HFP_SQRT 0x001d
260 #define PGM_PC_TRANS_SPEC 0x001f
261 #define PGM_AFX_TRANS 0x0020
262 #define PGM_ASX_TRANS 0x0021
263 #define PGM_LX_TRANS 0x0022
264 #define PGM_EX_TRANS 0x0023
265 #define PGM_PRIM_AUTH 0x0024
266 #define PGM_SEC_AUTH 0x0025
267 #define PGM_ALET_SPEC 0x0028
268 #define PGM_ALEN_SPEC 0x0029
269 #define PGM_ALE_SEQ 0x002a
270 #define PGM_ASTE_VALID 0x002b
271 #define PGM_ASTE_SEQ 0x002c
272 #define PGM_EXT_AUTH 0x002d
273 #define PGM_STACK_FULL 0x0030
274 #define PGM_STACK_EMPTY 0x0031
275 #define PGM_STACK_SPEC 0x0032
276 #define PGM_STACK_TYPE 0x0033
277 #define PGM_STACK_OP 0x0034
278 #define PGM_ASCE_TYPE 0x0038
279 #define PGM_REG_FIRST_TRANS 0x0039
280 #define PGM_REG_SEC_TRANS 0x003a
281 #define PGM_REG_THIRD_TRANS 0x003b
282 #define PGM_MONITOR 0x0040
283 #define PGM_PER 0x0080
284 #define PGM_CRYPTO 0x0119
286 /* External Interrupts */
287 #define EXT_INTERRUPT_KEY 0x0040
288 #define EXT_CLOCK_COMP 0x1004
289 #define EXT_CPU_TIMER 0x1005
290 #define EXT_MALFUNCTION 0x1200
291 #define EXT_EMERGENCY 0x1201
292 #define EXT_EXTERNAL_CALL 0x1202
293 #define EXT_ETR 0x1406
294 #define EXT_SERVICE 0x2401
295 #define EXT_VIRTIO 0x2603
297 /* PSW defines */
298 #undef PSW_MASK_PER
299 #undef PSW_MASK_DAT
300 #undef PSW_MASK_IO
301 #undef PSW_MASK_EXT
302 #undef PSW_MASK_KEY
303 #undef PSW_SHIFT_KEY
304 #undef PSW_MASK_MCHECK
305 #undef PSW_MASK_WAIT
306 #undef PSW_MASK_PSTATE
307 #undef PSW_MASK_ASC
308 #undef PSW_SHIFT_ASC
309 #undef PSW_MASK_CC
310 #undef PSW_MASK_PM
311 #undef PSW_MASK_64
312 #undef PSW_MASK_32
313 #undef PSW_MASK_ESA_ADDR
315 #define PSW_MASK_PER 0x4000000000000000ULL
316 #define PSW_MASK_DAT 0x0400000000000000ULL
317 #define PSW_MASK_IO 0x0200000000000000ULL
318 #define PSW_MASK_EXT 0x0100000000000000ULL
319 #define PSW_MASK_KEY 0x00F0000000000000ULL
320 #define PSW_SHIFT_KEY 52
321 #define PSW_MASK_MCHECK 0x0004000000000000ULL
322 #define PSW_MASK_WAIT 0x0002000000000000ULL
323 #define PSW_MASK_PSTATE 0x0001000000000000ULL
324 #define PSW_MASK_ASC 0x0000C00000000000ULL
325 #define PSW_SHIFT_ASC 46
326 #define PSW_MASK_CC 0x0000300000000000ULL
327 #define PSW_MASK_PM 0x00000F0000000000ULL
328 #define PSW_MASK_64 0x0000000100000000ULL
329 #define PSW_MASK_32 0x0000000080000000ULL
330 #define PSW_MASK_ESA_ADDR 0x000000007fffffffULL
332 #undef PSW_ASC_PRIMARY
333 #undef PSW_ASC_ACCREG
334 #undef PSW_ASC_SECONDARY
335 #undef PSW_ASC_HOME
337 #define PSW_ASC_PRIMARY 0x0000000000000000ULL
338 #define PSW_ASC_ACCREG 0x0000400000000000ULL
339 #define PSW_ASC_SECONDARY 0x0000800000000000ULL
340 #define PSW_ASC_HOME 0x0000C00000000000ULL
342 /* the address space values shifted */
343 #define AS_PRIMARY 0
344 #define AS_ACCREG 1
345 #define AS_SECONDARY 2
346 #define AS_HOME 3
348 /* tb flags */
350 #define FLAG_MASK_PSW_SHIFT 31
351 #define FLAG_MASK_PER (PSW_MASK_PER >> FLAG_MASK_PSW_SHIFT)
352 #define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> FLAG_MASK_PSW_SHIFT)
353 #define FLAG_MASK_ASC (PSW_MASK_ASC >> FLAG_MASK_PSW_SHIFT)
354 #define FLAG_MASK_64 (PSW_MASK_64 >> FLAG_MASK_PSW_SHIFT)
355 #define FLAG_MASK_32 (PSW_MASK_32 >> FLAG_MASK_PSW_SHIFT)
356 #define FLAG_MASK_PSW (FLAG_MASK_PER | FLAG_MASK_PSTATE \
357 | FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32)
359 /* Control register 0 bits */
360 #define CR0_LOWPROT 0x0000000010000000ULL
361 #define CR0_SECONDARY 0x0000000004000000ULL
362 #define CR0_EDAT 0x0000000000800000ULL
364 /* MMU */
365 #define MMU_PRIMARY_IDX 0
366 #define MMU_SECONDARY_IDX 1
367 #define MMU_HOME_IDX 2
369 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
371 uint16_t pkm = env->cregs[3] >> 16;
373 if (env->psw.mask & PSW_MASK_PSTATE) {
374 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
375 return pkm & (0x80 >> psw_key);
377 return true;
380 static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch)
382 switch (env->psw.mask & PSW_MASK_ASC) {
383 case PSW_ASC_PRIMARY:
384 return MMU_PRIMARY_IDX;
385 case PSW_ASC_SECONDARY:
386 return MMU_SECONDARY_IDX;
387 case PSW_ASC_HOME:
388 return MMU_HOME_IDX;
389 case PSW_ASC_ACCREG:
390 /* Fallthrough: access register mode is not yet supported */
391 default:
392 abort();
396 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
398 switch (mmu_idx) {
399 case MMU_PRIMARY_IDX:
400 return PSW_ASC_PRIMARY;
401 case MMU_SECONDARY_IDX:
402 return PSW_ASC_SECONDARY;
403 case MMU_HOME_IDX:
404 return PSW_ASC_HOME;
405 default:
406 abort();
410 static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
411 target_ulong *cs_base, uint32_t *flags)
413 *pc = env->psw.addr;
414 *cs_base = env->ex_value;
415 *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
418 #define MAX_ILEN 6
420 /* While the PoO talks about ILC (a number between 1-3) what is actually
421 stored in LowCore is shifted left one bit (an even between 2-6). As
422 this is the actual length of the insn and therefore more useful, that
423 is what we want to pass around and manipulate. To make sure that we
424 have applied this distinction universally, rename the "ILC" to "ILEN". */
425 static inline int get_ilen(uint8_t opc)
427 switch (opc >> 6) {
428 case 0:
429 return 2;
430 case 1:
431 case 2:
432 return 4;
433 default:
434 return 6;
438 /* PER bits from control register 9 */
439 #define PER_CR9_EVENT_BRANCH 0x80000000
440 #define PER_CR9_EVENT_IFETCH 0x40000000
441 #define PER_CR9_EVENT_STORE 0x20000000
442 #define PER_CR9_EVENT_STORE_REAL 0x08000000
443 #define PER_CR9_EVENT_NULLIFICATION 0x01000000
444 #define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
445 #define PER_CR9_CONTROL_ALTERATION 0x00200000
447 /* PER bits from the PER CODE/ATMID/AI in lowcore */
448 #define PER_CODE_EVENT_BRANCH 0x8000
449 #define PER_CODE_EVENT_IFETCH 0x4000
450 #define PER_CODE_EVENT_STORE 0x2000
451 #define PER_CODE_EVENT_STORE_REAL 0x0800
452 #define PER_CODE_EVENT_NULLIFICATION 0x0100
454 /* Compute the ATMID field that is stored in the per_perc_atmid lowcore
455 entry when a PER exception is triggered. */
456 static inline uint8_t get_per_atmid(CPUS390XState *env)
458 return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
459 ( (1 << 6) ) |
460 ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
461 ((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) |
462 ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
463 ((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0);
466 /* Check if an address is within the PER starting address and the PER
467 ending address. The address range might loop. */
468 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
470 if (env->cregs[10] <= env->cregs[11]) {
471 return env->cregs[10] <= addr && addr <= env->cregs[11];
472 } else {
473 return env->cregs[10] <= addr || addr <= env->cregs[11];
477 S390CPU *cpu_s390x_init(const char *cpu_model);
478 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp);
479 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp);
480 void s390x_translate_init(void);
482 /* you can call this signal handler from your SIGBUS and SIGSEGV
483 signal handlers to inform the virtual CPU of exceptions. non zero
484 is returned if the signal was handled by the virtual CPU. */
485 int cpu_s390x_signal_handler(int host_signum, void *pinfo,
486 void *puc);
487 int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
488 int mmu_idx);
491 #ifndef CONFIG_USER_ONLY
492 void do_restart_interrupt(CPUS390XState *env);
493 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
494 MMUAccessType access_type,
495 int mmu_idx, uintptr_t retaddr);
497 static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
498 uint8_t *ar)
500 hwaddr addr = 0;
501 uint8_t reg;
503 reg = ipb >> 28;
504 if (reg > 0) {
505 addr = env->regs[reg];
507 addr += (ipb >> 16) & 0xfff;
508 if (ar) {
509 *ar = reg;
512 return addr;
515 /* Base/displacement are at the same locations. */
516 #define decode_basedisp_rs decode_basedisp_s
518 /* helper functions for run_on_cpu() */
519 static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
521 S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
523 scc->cpu_reset(cs);
525 static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
527 cpu_reset(cs);
530 void s390x_tod_timer(void *opaque);
531 void s390x_cpu_timer(void *opaque);
533 int s390_virtio_hypercall(CPUS390XState *env);
535 #ifdef CONFIG_KVM
536 void kvm_s390_service_interrupt(uint32_t parm);
537 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
538 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
539 int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
540 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
541 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
542 int len, bool is_write);
543 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
544 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
545 #else
546 static inline void kvm_s390_service_interrupt(uint32_t parm)
549 static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
551 return -ENOSYS;
553 static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
555 return -ENOSYS;
557 static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
558 void *hostbuf, int len, bool is_write)
560 return -ENOSYS;
562 static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
563 uint64_t te_code)
566 #endif
568 static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
570 if (kvm_enabled()) {
571 return kvm_s390_get_clock(tod_high, tod_low);
573 /* Fixme TCG */
574 *tod_high = 0;
575 *tod_low = 0;
576 return 0;
579 static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
581 if (kvm_enabled()) {
582 return kvm_s390_set_clock(tod_high, tod_low);
584 /* Fixme TCG */
585 return 0;
588 S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
589 unsigned int s390_cpu_halt(S390CPU *cpu);
590 void s390_cpu_unhalt(S390CPU *cpu);
591 unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
592 static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
594 return cpu->env.cpu_state;
597 void gtod_save(QEMUFile *f, void *opaque);
598 int gtod_load(QEMUFile *f, void *opaque, int version_id);
600 void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
601 uint64_t param64);
603 /* ioinst.c */
604 void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1);
605 void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1);
606 void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1);
607 void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
608 void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
609 void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
610 void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
611 int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
612 void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
613 int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
614 void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
615 uint32_t ipb);
616 void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);
617 void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1);
618 void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1);
620 /* service interrupts are floating therefore we must not pass an cpustate */
621 void s390_sclp_extint(uint32_t parm);
623 #else
624 static inline unsigned int s390_cpu_halt(S390CPU *cpu)
626 return 0;
629 static inline void s390_cpu_unhalt(S390CPU *cpu)
633 static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
635 return 0;
637 #endif
639 extern void subsystem_reset(void);
641 #define cpu_init(model) CPU(cpu_s390x_init(model))
642 #define cpu_signal_handler cpu_s390x_signal_handler
644 void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
645 #define cpu_list s390_cpu_list
646 void s390_cpu_model_register_props(Object *obj);
647 void s390_cpu_model_class_register_props(ObjectClass *oc);
648 void s390_realize_cpu_model(CPUState *cs, Error **errp);
649 ObjectClass *s390_cpu_class_by_name(const char *name);
651 #define EXCP_EXT 1 /* external interrupt */
652 #define EXCP_SVC 2 /* supervisor call (syscall) */
653 #define EXCP_PGM 3 /* program interruption */
654 #define EXCP_IO 7 /* I/O interrupt */
655 #define EXCP_MCHK 8 /* machine check */
657 #define INTERRUPT_EXT (1 << 0)
658 #define INTERRUPT_TOD (1 << 1)
659 #define INTERRUPT_CPUTIMER (1 << 2)
660 #define INTERRUPT_IO (1 << 3)
661 #define INTERRUPT_MCHK (1 << 4)
663 /* Program Status Word. */
664 #define S390_PSWM_REGNUM 0
665 #define S390_PSWA_REGNUM 1
666 /* General Purpose Registers. */
667 #define S390_R0_REGNUM 2
668 #define S390_R1_REGNUM 3
669 #define S390_R2_REGNUM 4
670 #define S390_R3_REGNUM 5
671 #define S390_R4_REGNUM 6
672 #define S390_R5_REGNUM 7
673 #define S390_R6_REGNUM 8
674 #define S390_R7_REGNUM 9
675 #define S390_R8_REGNUM 10
676 #define S390_R9_REGNUM 11
677 #define S390_R10_REGNUM 12
678 #define S390_R11_REGNUM 13
679 #define S390_R12_REGNUM 14
680 #define S390_R13_REGNUM 15
681 #define S390_R14_REGNUM 16
682 #define S390_R15_REGNUM 17
683 /* Total Core Registers. */
684 #define S390_NUM_CORE_REGS 18
686 /* CC optimization */
688 /* Instead of computing the condition codes after each x86 instruction,
689 * QEMU just stores the result (called CC_DST), the type of operation
690 * (called CC_OP) and whatever operands are needed (CC_SRC and possibly
691 * CC_VR). When the condition codes are needed, the condition codes can
692 * be calculated using this information. Condition codes are not generated
693 * if they are only needed for conditional branches.
695 enum cc_op {
696 CC_OP_CONST0 = 0, /* CC is 0 */
697 CC_OP_CONST1, /* CC is 1 */
698 CC_OP_CONST2, /* CC is 2 */
699 CC_OP_CONST3, /* CC is 3 */
701 CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
702 CC_OP_STATIC, /* CC value is env->cc_op */
704 CC_OP_NZ, /* env->cc_dst != 0 */
705 CC_OP_LTGT_32, /* signed less/greater than (32bit) */
706 CC_OP_LTGT_64, /* signed less/greater than (64bit) */
707 CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
708 CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
709 CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
710 CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
712 CC_OP_ADD_64, /* overflow on add (64bit) */
713 CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
714 CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
715 CC_OP_SUB_64, /* overflow on subtraction (64bit) */
716 CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
717 CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
718 CC_OP_ABS_64, /* sign eval on abs (64bit) */
719 CC_OP_NABS_64, /* sign eval on nabs (64bit) */
721 CC_OP_ADD_32, /* overflow on add (32bit) */
722 CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
723 CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
724 CC_OP_SUB_32, /* overflow on subtraction (32bit) */
725 CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
726 CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
727 CC_OP_ABS_32, /* sign eval on abs (64bit) */
728 CC_OP_NABS_32, /* sign eval on nabs (64bit) */
730 CC_OP_COMP_32, /* complement */
731 CC_OP_COMP_64, /* complement */
733 CC_OP_TM_32, /* test under mask (32bit) */
734 CC_OP_TM_64, /* test under mask (64bit) */
736 CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
737 CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
738 CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
740 CC_OP_ICM, /* insert characters under mask */
741 CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
742 CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
743 CC_OP_FLOGR, /* find leftmost one */
744 CC_OP_MAX
747 static const char *cc_names[] = {
748 [CC_OP_CONST0] = "CC_OP_CONST0",
749 [CC_OP_CONST1] = "CC_OP_CONST1",
750 [CC_OP_CONST2] = "CC_OP_CONST2",
751 [CC_OP_CONST3] = "CC_OP_CONST3",
752 [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
753 [CC_OP_STATIC] = "CC_OP_STATIC",
754 [CC_OP_NZ] = "CC_OP_NZ",
755 [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
756 [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
757 [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
758 [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
759 [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
760 [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
761 [CC_OP_ADD_64] = "CC_OP_ADD_64",
762 [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
763 [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
764 [CC_OP_SUB_64] = "CC_OP_SUB_64",
765 [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
766 [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
767 [CC_OP_ABS_64] = "CC_OP_ABS_64",
768 [CC_OP_NABS_64] = "CC_OP_NABS_64",
769 [CC_OP_ADD_32] = "CC_OP_ADD_32",
770 [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
771 [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
772 [CC_OP_SUB_32] = "CC_OP_SUB_32",
773 [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
774 [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
775 [CC_OP_ABS_32] = "CC_OP_ABS_32",
776 [CC_OP_NABS_32] = "CC_OP_NABS_32",
777 [CC_OP_COMP_32] = "CC_OP_COMP_32",
778 [CC_OP_COMP_64] = "CC_OP_COMP_64",
779 [CC_OP_TM_32] = "CC_OP_TM_32",
780 [CC_OP_TM_64] = "CC_OP_TM_64",
781 [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
782 [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
783 [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
784 [CC_OP_ICM] = "CC_OP_ICM",
785 [CC_OP_SLA_32] = "CC_OP_SLA_32",
786 [CC_OP_SLA_64] = "CC_OP_SLA_64",
787 [CC_OP_FLOGR] = "CC_OP_FLOGR",
790 static inline const char *cc_name(int cc_op)
792 return cc_names[cc_op];
795 static inline void setcc(S390CPU *cpu, uint64_t cc)
797 CPUS390XState *env = &cpu->env;
799 env->psw.mask &= ~(3ull << 44);
800 env->psw.mask |= (cc & 3) << 44;
801 env->cc_op = cc;
804 #ifndef CONFIG_USER_ONLY
806 typedef struct LowCore
808 /* prefix area: defined by architecture */
809 uint32_t ccw1[2]; /* 0x000 */
810 uint32_t ccw2[4]; /* 0x008 */
811 uint8_t pad1[0x80-0x18]; /* 0x018 */
812 uint32_t ext_params; /* 0x080 */
813 uint16_t cpu_addr; /* 0x084 */
814 uint16_t ext_int_code; /* 0x086 */
815 uint16_t svc_ilen; /* 0x088 */
816 uint16_t svc_code; /* 0x08a */
817 uint16_t pgm_ilen; /* 0x08c */
818 uint16_t pgm_code; /* 0x08e */
819 uint32_t data_exc_code; /* 0x090 */
820 uint16_t mon_class_num; /* 0x094 */
821 uint16_t per_perc_atmid; /* 0x096 */
822 uint64_t per_address; /* 0x098 */
823 uint8_t exc_access_id; /* 0x0a0 */
824 uint8_t per_access_id; /* 0x0a1 */
825 uint8_t op_access_id; /* 0x0a2 */
826 uint8_t ar_access_id; /* 0x0a3 */
827 uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */
828 uint64_t trans_exc_code; /* 0x0a8 */
829 uint64_t monitor_code; /* 0x0b0 */
830 uint16_t subchannel_id; /* 0x0b8 */
831 uint16_t subchannel_nr; /* 0x0ba */
832 uint32_t io_int_parm; /* 0x0bc */
833 uint32_t io_int_word; /* 0x0c0 */
834 uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */
835 uint32_t stfl_fac_list; /* 0x0c8 */
836 uint8_t pad4[0xe8-0xcc]; /* 0x0cc */
837 uint32_t mcck_interruption_code[2]; /* 0x0e8 */
838 uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
839 uint32_t external_damage_code; /* 0x0f4 */
840 uint64_t failing_storage_address; /* 0x0f8 */
841 uint8_t pad6[0x110-0x100]; /* 0x100 */
842 uint64_t per_breaking_event_addr; /* 0x110 */
843 uint8_t pad7[0x120-0x118]; /* 0x118 */
844 PSW restart_old_psw; /* 0x120 */
845 PSW external_old_psw; /* 0x130 */
846 PSW svc_old_psw; /* 0x140 */
847 PSW program_old_psw; /* 0x150 */
848 PSW mcck_old_psw; /* 0x160 */
849 PSW io_old_psw; /* 0x170 */
850 uint8_t pad8[0x1a0-0x180]; /* 0x180 */
851 PSW restart_new_psw; /* 0x1a0 */
852 PSW external_new_psw; /* 0x1b0 */
853 PSW svc_new_psw; /* 0x1c0 */
854 PSW program_new_psw; /* 0x1d0 */
855 PSW mcck_new_psw; /* 0x1e0 */
856 PSW io_new_psw; /* 0x1f0 */
857 PSW return_psw; /* 0x200 */
858 uint8_t irb[64]; /* 0x210 */
859 uint64_t sync_enter_timer; /* 0x250 */
860 uint64_t async_enter_timer; /* 0x258 */
861 uint64_t exit_timer; /* 0x260 */
862 uint64_t last_update_timer; /* 0x268 */
863 uint64_t user_timer; /* 0x270 */
864 uint64_t system_timer; /* 0x278 */
865 uint64_t last_update_clock; /* 0x280 */
866 uint64_t steal_clock; /* 0x288 */
867 PSW return_mcck_psw; /* 0x290 */
868 uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */
869 /* System info area */
870 uint64_t save_area[16]; /* 0xc00 */
871 uint8_t pad10[0xd40-0xc80]; /* 0xc80 */
872 uint64_t kernel_stack; /* 0xd40 */
873 uint64_t thread_info; /* 0xd48 */
874 uint64_t async_stack; /* 0xd50 */
875 uint64_t kernel_asce; /* 0xd58 */
876 uint64_t user_asce; /* 0xd60 */
877 uint64_t panic_stack; /* 0xd68 */
878 uint64_t user_exec_asce; /* 0xd70 */
879 uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */
881 /* SMP info area: defined by DJB */
882 uint64_t clock_comparator; /* 0xdc0 */
883 uint64_t ext_call_fast; /* 0xdc8 */
884 uint64_t percpu_offset; /* 0xdd0 */
885 uint64_t current_task; /* 0xdd8 */
886 uint32_t softirq_pending; /* 0xde0 */
887 uint32_t pad_0x0de4; /* 0xde4 */
888 uint64_t int_clock; /* 0xde8 */
889 uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */
891 /* 0xe00 is used as indicator for dump tools */
892 /* whether the kernel died with panic() or not */
893 uint32_t panic_magic; /* 0xe00 */
895 uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */
897 /* 64 bit extparam used for pfault, diag 250 etc */
898 uint64_t ext_params2; /* 0x11B8 */
900 uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */
902 /* System info area */
904 uint64_t floating_pt_save_area[16]; /* 0x1200 */
905 uint64_t gpregs_save_area[16]; /* 0x1280 */
906 uint32_t st_status_fixed_logout[4]; /* 0x1300 */
907 uint8_t pad15[0x1318-0x1310]; /* 0x1310 */
908 uint32_t prefixreg_save_area; /* 0x1318 */
909 uint32_t fpt_creg_save_area; /* 0x131c */
910 uint8_t pad16[0x1324-0x1320]; /* 0x1320 */
911 uint32_t tod_progreg_save_area; /* 0x1324 */
912 uint32_t cpu_timer_save_area[2]; /* 0x1328 */
913 uint32_t clock_comp_save_area[2]; /* 0x1330 */
914 uint8_t pad17[0x1340-0x1338]; /* 0x1338 */
915 uint32_t access_regs_save_area[16]; /* 0x1340 */
916 uint64_t cregs_save_area[16]; /* 0x1380 */
918 /* align to the top of the prefix area */
920 uint8_t pad18[0x2000-0x1400]; /* 0x1400 */
921 } QEMU_PACKED LowCore;
923 LowCore *cpu_map_lowcore(CPUS390XState *env);
924 void cpu_unmap_lowcore(LowCore *lowcore);
926 #endif
928 /* STSI */
929 #define STSI_LEVEL_MASK 0x00000000f0000000ULL
930 #define STSI_LEVEL_CURRENT 0x0000000000000000ULL
931 #define STSI_LEVEL_1 0x0000000010000000ULL
932 #define STSI_LEVEL_2 0x0000000020000000ULL
933 #define STSI_LEVEL_3 0x0000000030000000ULL
934 #define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
935 #define STSI_R0_SEL1_MASK 0x00000000000000ffULL
936 #define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
937 #define STSI_R1_SEL2_MASK 0x000000000000ffffULL
939 /* Basic Machine Configuration */
940 struct sysib_111 {
941 uint32_t res1[8];
942 uint8_t manuf[16];
943 uint8_t type[4];
944 uint8_t res2[12];
945 uint8_t model[16];
946 uint8_t sequence[16];
947 uint8_t plant[4];
948 uint8_t res3[156];
951 /* Basic Machine CPU */
952 struct sysib_121 {
953 uint32_t res1[80];
954 uint8_t sequence[16];
955 uint8_t plant[4];
956 uint8_t res2[2];
957 uint16_t cpu_addr;
958 uint8_t res3[152];
961 /* Basic Machine CPUs */
962 struct sysib_122 {
963 uint8_t res1[32];
964 uint32_t capability;
965 uint16_t total_cpus;
966 uint16_t active_cpus;
967 uint16_t standby_cpus;
968 uint16_t reserved_cpus;
969 uint16_t adjustments[2026];
972 /* LPAR CPU */
973 struct sysib_221 {
974 uint32_t res1[80];
975 uint8_t sequence[16];
976 uint8_t plant[4];
977 uint16_t cpu_id;
978 uint16_t cpu_addr;
979 uint8_t res3[152];
982 /* LPAR CPUs */
983 struct sysib_222 {
984 uint32_t res1[32];
985 uint16_t lpar_num;
986 uint8_t res2;
987 uint8_t lcpuc;
988 uint16_t total_cpus;
989 uint16_t conf_cpus;
990 uint16_t standby_cpus;
991 uint16_t reserved_cpus;
992 uint8_t name[8];
993 uint32_t caf;
994 uint8_t res3[16];
995 uint16_t dedicated_cpus;
996 uint16_t shared_cpus;
997 uint8_t res4[180];
1000 /* VM CPUs */
1001 struct sysib_322 {
1002 uint8_t res1[31];
1003 uint8_t count;
1004 struct {
1005 uint8_t res2[4];
1006 uint16_t total_cpus;
1007 uint16_t conf_cpus;
1008 uint16_t standby_cpus;
1009 uint16_t reserved_cpus;
1010 uint8_t name[8];
1011 uint32_t caf;
1012 uint8_t cpi[16];
1013 uint8_t res5[3];
1014 uint8_t ext_name_encoding;
1015 uint32_t res3;
1016 uint8_t uuid[16];
1017 } vm[8];
1018 uint8_t res4[1504];
1019 uint8_t ext_names[8][256];
1022 /* MMU defines */
1023 #define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
1024 #define _ASCE_SUBSPACE 0x200 /* subspace group control */
1025 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
1026 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
1027 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
1028 #define _ASCE_REAL_SPACE 0x20 /* real space control */
1029 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
1030 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
1031 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
1032 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
1033 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
1034 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
1036 #define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
1037 #define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */
1038 #define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */
1039 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
1040 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
1041 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
1042 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
1043 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
1044 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
1046 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */
1047 #define _SEGMENT_ENTRY_FC 0x400 /* format control */
1048 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
1049 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
1051 #define VADDR_PX 0xff000 /* page index bits */
1053 #define _PAGE_RO 0x200 /* HW read-only bit */
1054 #define _PAGE_INVALID 0x400 /* HW invalid bit */
1055 #define _PAGE_RES0 0x800 /* bit must be zero */
1057 #define SK_C (0x1 << 1)
1058 #define SK_R (0x1 << 2)
1059 #define SK_F (0x1 << 3)
1060 #define SK_ACC_MASK (0xf << 4)
1062 /* SIGP order codes */
1063 #define SIGP_SENSE 0x01
1064 #define SIGP_EXTERNAL_CALL 0x02
1065 #define SIGP_EMERGENCY 0x03
1066 #define SIGP_START 0x04
1067 #define SIGP_STOP 0x05
1068 #define SIGP_RESTART 0x06
1069 #define SIGP_STOP_STORE_STATUS 0x09
1070 #define SIGP_INITIAL_CPU_RESET 0x0b
1071 #define SIGP_CPU_RESET 0x0c
1072 #define SIGP_SET_PREFIX 0x0d
1073 #define SIGP_STORE_STATUS_ADDR 0x0e
1074 #define SIGP_SET_ARCH 0x12
1075 #define SIGP_STORE_ADTL_STATUS 0x17
1077 /* SIGP condition codes */
1078 #define SIGP_CC_ORDER_CODE_ACCEPTED 0
1079 #define SIGP_CC_STATUS_STORED 1
1080 #define SIGP_CC_BUSY 2
1081 #define SIGP_CC_NOT_OPERATIONAL 3
1083 /* SIGP status bits */
1084 #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
1085 #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
1086 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
1087 #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
1088 #define SIGP_STAT_STOPPED 0x00000040UL
1089 #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
1090 #define SIGP_STAT_CHECK_STOP 0x00000010UL
1091 #define SIGP_STAT_INOPERATIVE 0x00000004UL
1092 #define SIGP_STAT_INVALID_ORDER 0x00000002UL
1093 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
1095 /* SIGP SET ARCHITECTURE modes */
1096 #define SIGP_MODE_ESA_S390 0
1097 #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
1098 #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
1100 /* SIGP order code mask corresponding to bit positions 56-63 */
1101 #define SIGP_ORDER_MASK 0x000000ff
1103 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
1104 uint64_t get_psw_mask(CPUS390XState *env);
1105 target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr);
1106 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
1107 target_ulong *raddr, int *flags, bool exc);
1108 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
1109 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
1110 uint64_t vr);
1111 void s390_cpu_recompute_watchpoints(CPUState *cs);
1113 int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
1114 int len, bool is_write);
1116 #define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
1117 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
1118 #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
1119 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
1120 #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
1121 s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
1123 /* The value of the TOD clock for 1.1.1970. */
1124 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
1126 /* Converts ns to s390's clock format */
1127 static inline uint64_t time2tod(uint64_t ns) {
1128 return (ns << 9) / 125;
1131 /* Converts s390's clock format to ns */
1132 static inline uint64_t tod2time(uint64_t t) {
1133 return (t * 125) >> 9;
1136 /* from s390-virtio-ccw */
1137 #define MEM_SECTION_SIZE 0x10000000UL
1138 #define MAX_AVAIL_SLOTS 32
1140 /* fpu_helper.c */
1141 uint32_t set_cc_nz_f32(float32 v);
1142 uint32_t set_cc_nz_f64(float64 v);
1143 uint32_t set_cc_nz_f128(float128 v);
1145 /* misc_helper.c */
1146 #ifndef CONFIG_USER_ONLY
1147 int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
1148 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
1149 #endif
1150 /* automatically detect the instruction length */
1151 #define ILEN_AUTO 0xff
1152 void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
1153 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
1154 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
1155 uintptr_t retaddr);
1157 #ifdef CONFIG_KVM
1158 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code);
1159 void kvm_s390_io_interrupt(uint16_t subchannel_id,
1160 uint16_t subchannel_nr, uint32_t io_int_parm,
1161 uint32_t io_int_word);
1162 void kvm_s390_crw_mchk(void);
1163 void kvm_s390_enable_css_support(S390CPU *cpu);
1164 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1165 int vq, bool assign);
1166 int kvm_s390_cpu_restart(S390CPU *cpu);
1167 int kvm_s390_get_memslot_count(void);
1168 int kvm_s390_cmma_active(void);
1169 void kvm_s390_cmma_reset(void);
1170 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
1171 void kvm_s390_reset_vcpu(S390CPU *cpu);
1172 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit);
1173 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
1174 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
1175 int kvm_s390_get_ri(void);
1176 int kvm_s390_get_gs(void);
1177 void kvm_s390_crypto_reset(void);
1178 #else
1179 static inline void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
1182 static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
1183 uint16_t subchannel_nr,
1184 uint32_t io_int_parm,
1185 uint32_t io_int_word)
1188 static inline void kvm_s390_crw_mchk(void)
1191 static inline void kvm_s390_enable_css_support(S390CPU *cpu)
1194 static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
1195 uint32_t sch, int vq,
1196 bool assign)
1198 return -ENOSYS;
1200 static inline int kvm_s390_cpu_restart(S390CPU *cpu)
1202 return -ENOSYS;
1204 static inline void kvm_s390_cmma_reset(void)
1207 static inline int kvm_s390_get_memslot_count(void)
1209 return MAX_AVAIL_SLOTS;
1211 static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1213 return -ENOSYS;
1215 static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
1218 static inline int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
1220 return 0;
1222 static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
1225 static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
1227 return 0;
1229 static inline int kvm_s390_get_ri(void)
1231 return 0;
1233 static inline int kvm_s390_get_gs(void)
1235 return 0;
1237 static inline void kvm_s390_crypto_reset(void)
1240 #endif
1242 static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
1244 if (kvm_enabled()) {
1245 return kvm_s390_set_mem_limit(new_limit, hw_limit);
1247 return 0;
1250 static inline void s390_cmma_reset(void)
1252 if (kvm_enabled()) {
1253 kvm_s390_cmma_reset();
1257 static inline int s390_cpu_restart(S390CPU *cpu)
1259 if (kvm_enabled()) {
1260 return kvm_s390_cpu_restart(cpu);
1262 return -ENOSYS;
1265 static inline int s390_get_memslot_count(void)
1267 if (kvm_enabled()) {
1268 return kvm_s390_get_memslot_count();
1269 } else {
1270 return MAX_AVAIL_SLOTS;
1274 void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
1275 uint32_t io_int_parm, uint32_t io_int_word);
1276 void s390_crw_mchk(void);
1278 static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
1279 uint32_t sch_id, int vq,
1280 bool assign)
1282 if (kvm_enabled()) {
1283 return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
1284 } else {
1285 return 0;
1289 static inline void s390_crypto_reset(void)
1291 if (kvm_enabled()) {
1292 kvm_s390_crypto_reset();
1296 static inline bool s390_get_squash_mcss(void)
1298 if (object_property_get_bool(OBJECT(qdev_get_machine()), "s390-squash-mcss",
1299 NULL)) {
1300 return true;
1303 return false;
1306 /* machine check interruption code */
1308 /* subclasses */
1309 #define MCIC_SC_SD 0x8000000000000000ULL
1310 #define MCIC_SC_PD 0x4000000000000000ULL
1311 #define MCIC_SC_SR 0x2000000000000000ULL
1312 #define MCIC_SC_CD 0x0800000000000000ULL
1313 #define MCIC_SC_ED 0x0400000000000000ULL
1314 #define MCIC_SC_DG 0x0100000000000000ULL
1315 #define MCIC_SC_W 0x0080000000000000ULL
1316 #define MCIC_SC_CP 0x0040000000000000ULL
1317 #define MCIC_SC_SP 0x0020000000000000ULL
1318 #define MCIC_SC_CK 0x0010000000000000ULL
1320 /* subclass modifiers */
1321 #define MCIC_SCM_B 0x0002000000000000ULL
1322 #define MCIC_SCM_DA 0x0000000020000000ULL
1323 #define MCIC_SCM_AP 0x0000000000080000ULL
1325 /* storage errors */
1326 #define MCIC_SE_SE 0x0000800000000000ULL
1327 #define MCIC_SE_SC 0x0000400000000000ULL
1328 #define MCIC_SE_KE 0x0000200000000000ULL
1329 #define MCIC_SE_DS 0x0000100000000000ULL
1330 #define MCIC_SE_IE 0x0000000080000000ULL
1332 /* validity bits */
1333 #define MCIC_VB_WP 0x0000080000000000ULL
1334 #define MCIC_VB_MS 0x0000040000000000ULL
1335 #define MCIC_VB_PM 0x0000020000000000ULL
1336 #define MCIC_VB_IA 0x0000010000000000ULL
1337 #define MCIC_VB_FA 0x0000008000000000ULL
1338 #define MCIC_VB_VR 0x0000004000000000ULL
1339 #define MCIC_VB_EC 0x0000002000000000ULL
1340 #define MCIC_VB_FP 0x0000001000000000ULL
1341 #define MCIC_VB_GR 0x0000000800000000ULL
1342 #define MCIC_VB_CR 0x0000000400000000ULL
1343 #define MCIC_VB_ST 0x0000000100000000ULL
1344 #define MCIC_VB_AR 0x0000000040000000ULL
1345 #define MCIC_VB_GS 0x0000000008000000ULL
1346 #define MCIC_VB_PR 0x0000000000200000ULL
1347 #define MCIC_VB_FC 0x0000000000100000ULL
1348 #define MCIC_VB_CT 0x0000000000020000ULL
1349 #define MCIC_VB_CC 0x0000000000010000ULL
1351 #endif