Merge remote-tracking branch 'remotes/pmaydell/tags/pull-cocoa-20150619-1' into staging
[qemu.git] / target-s390x / cpu.h
blob7b87c7dcfb5f90c7f7a78af0fe1250f35b1ef49a
1 /*
2 * S/390 virtual CPU header
4 * Copyright (c) 2009 Ulrich Hecht
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * Contributions after 2012-10-29 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
19 * You should have received a copy of the GNU (Lesser) General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #ifndef CPU_S390X_H
23 #define CPU_S390X_H
25 #include "config.h"
26 #include "qemu-common.h"
28 #define TARGET_LONG_BITS 64
30 #define ELF_MACHINE EM_S390
31 #define ELF_MACHINE_UNAME "S390X"
33 #define CPUArchState struct CPUS390XState
35 #include "exec/cpu-defs.h"
36 #define TARGET_PAGE_BITS 12
38 #define TARGET_PHYS_ADDR_SPACE_BITS 64
39 #define TARGET_VIRT_ADDR_SPACE_BITS 64
41 #include "exec/cpu-all.h"
43 #include "fpu/softfloat.h"
45 #define NB_MMU_MODES 3
47 #define MMU_MODE0_SUFFIX _primary
48 #define MMU_MODE1_SUFFIX _secondary
49 #define MMU_MODE2_SUFFIX _home
51 #define MMU_USER_IDX 0
53 #define MAX_EXT_QUEUE 16
54 #define MAX_IO_QUEUE 16
55 #define MAX_MCHK_QUEUE 16
57 #define PSW_MCHK_MASK 0x0004000000000000
58 #define PSW_IO_MASK 0x0200000000000000
60 typedef struct PSW {
61 uint64_t mask;
62 uint64_t addr;
63 } PSW;
65 typedef struct ExtQueue {
66 uint32_t code;
67 uint32_t param;
68 uint32_t param64;
69 } ExtQueue;
71 typedef struct IOIntQueue {
72 uint16_t id;
73 uint16_t nr;
74 uint32_t parm;
75 uint32_t word;
76 } IOIntQueue;
78 typedef struct MchkQueue {
79 uint16_t type;
80 } MchkQueue;
82 typedef struct CPUS390XState {
83 uint64_t regs[16]; /* GP registers */
85 * The floating point registers are part of the vector registers.
86 * vregs[0][0] -> vregs[15][0] are 16 floating point registers
88 CPU_DoubleU vregs[32][2]; /* vector registers */
89 uint32_t aregs[16]; /* access registers */
91 uint32_t fpc; /* floating-point control register */
92 uint32_t cc_op;
94 float_status fpu_status; /* passed to softfloat lib */
96 /* The low part of a 128-bit return, or remainder of a divide. */
97 uint64_t retxl;
99 PSW psw;
101 uint64_t cc_src;
102 uint64_t cc_dst;
103 uint64_t cc_vr;
105 uint64_t __excp_addr;
106 uint64_t psa;
108 uint32_t int_pgm_code;
109 uint32_t int_pgm_ilen;
111 uint32_t int_svc_code;
112 uint32_t int_svc_ilen;
114 uint64_t per_address;
115 uint16_t per_perc_atmid;
117 uint64_t cregs[16]; /* control registers */
119 ExtQueue ext_queue[MAX_EXT_QUEUE];
120 IOIntQueue io_queue[MAX_IO_QUEUE][8];
121 MchkQueue mchk_queue[MAX_MCHK_QUEUE];
123 int pending_int;
124 int ext_index;
125 int io_index[8];
126 int mchk_index;
128 uint64_t ckc;
129 uint64_t cputm;
130 uint32_t todpr;
132 uint64_t pfault_token;
133 uint64_t pfault_compare;
134 uint64_t pfault_select;
136 uint64_t gbea;
137 uint64_t pp;
139 CPU_COMMON
141 /* reset does memset(0) up to here */
143 uint32_t cpu_num;
144 uint32_t machine_type;
146 uint8_t *storage_keys;
148 uint64_t tod_offset;
149 uint64_t tod_basetime;
150 QEMUTimer *tod_timer;
152 QEMUTimer *cpu_timer;
155 * The cpu state represents the logical state of a cpu. In contrast to other
156 * architectures, there is a difference between a halt and a stop on s390.
157 * If all cpus are either stopped (including check stop) or in the disabled
158 * wait state, the vm can be shut down.
160 #define CPU_STATE_UNINITIALIZED 0x00
161 #define CPU_STATE_STOPPED 0x01
162 #define CPU_STATE_CHECK_STOP 0x02
163 #define CPU_STATE_OPERATING 0x03
164 #define CPU_STATE_LOAD 0x04
165 uint8_t cpu_state;
167 /* currently processed sigp order */
168 uint8_t sigp_order;
170 } CPUS390XState;
172 static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
174 return &cs->vregs[nr][0];
177 #include "cpu-qom.h"
178 #include <sysemu/kvm.h>
180 /* distinguish between 24 bit and 31 bit addressing */
181 #define HIGH_ORDER_BIT 0x80000000
183 /* Interrupt Codes */
184 /* Program Interrupts */
185 #define PGM_OPERATION 0x0001
186 #define PGM_PRIVILEGED 0x0002
187 #define PGM_EXECUTE 0x0003
188 #define PGM_PROTECTION 0x0004
189 #define PGM_ADDRESSING 0x0005
190 #define PGM_SPECIFICATION 0x0006
191 #define PGM_DATA 0x0007
192 #define PGM_FIXPT_OVERFLOW 0x0008
193 #define PGM_FIXPT_DIVIDE 0x0009
194 #define PGM_DEC_OVERFLOW 0x000a
195 #define PGM_DEC_DIVIDE 0x000b
196 #define PGM_HFP_EXP_OVERFLOW 0x000c
197 #define PGM_HFP_EXP_UNDERFLOW 0x000d
198 #define PGM_HFP_SIGNIFICANCE 0x000e
199 #define PGM_HFP_DIVIDE 0x000f
200 #define PGM_SEGMENT_TRANS 0x0010
201 #define PGM_PAGE_TRANS 0x0011
202 #define PGM_TRANS_SPEC 0x0012
203 #define PGM_SPECIAL_OP 0x0013
204 #define PGM_OPERAND 0x0015
205 #define PGM_TRACE_TABLE 0x0016
206 #define PGM_SPACE_SWITCH 0x001c
207 #define PGM_HFP_SQRT 0x001d
208 #define PGM_PC_TRANS_SPEC 0x001f
209 #define PGM_AFX_TRANS 0x0020
210 #define PGM_ASX_TRANS 0x0021
211 #define PGM_LX_TRANS 0x0022
212 #define PGM_EX_TRANS 0x0023
213 #define PGM_PRIM_AUTH 0x0024
214 #define PGM_SEC_AUTH 0x0025
215 #define PGM_ALET_SPEC 0x0028
216 #define PGM_ALEN_SPEC 0x0029
217 #define PGM_ALE_SEQ 0x002a
218 #define PGM_ASTE_VALID 0x002b
219 #define PGM_ASTE_SEQ 0x002c
220 #define PGM_EXT_AUTH 0x002d
221 #define PGM_STACK_FULL 0x0030
222 #define PGM_STACK_EMPTY 0x0031
223 #define PGM_STACK_SPEC 0x0032
224 #define PGM_STACK_TYPE 0x0033
225 #define PGM_STACK_OP 0x0034
226 #define PGM_ASCE_TYPE 0x0038
227 #define PGM_REG_FIRST_TRANS 0x0039
228 #define PGM_REG_SEC_TRANS 0x003a
229 #define PGM_REG_THIRD_TRANS 0x003b
230 #define PGM_MONITOR 0x0040
231 #define PGM_PER 0x0080
232 #define PGM_CRYPTO 0x0119
234 /* External Interrupts */
235 #define EXT_INTERRUPT_KEY 0x0040
236 #define EXT_CLOCK_COMP 0x1004
237 #define EXT_CPU_TIMER 0x1005
238 #define EXT_MALFUNCTION 0x1200
239 #define EXT_EMERGENCY 0x1201
240 #define EXT_EXTERNAL_CALL 0x1202
241 #define EXT_ETR 0x1406
242 #define EXT_SERVICE 0x2401
243 #define EXT_VIRTIO 0x2603
245 /* PSW defines */
246 #undef PSW_MASK_PER
247 #undef PSW_MASK_DAT
248 #undef PSW_MASK_IO
249 #undef PSW_MASK_EXT
250 #undef PSW_MASK_KEY
251 #undef PSW_SHIFT_KEY
252 #undef PSW_MASK_MCHECK
253 #undef PSW_MASK_WAIT
254 #undef PSW_MASK_PSTATE
255 #undef PSW_MASK_ASC
256 #undef PSW_MASK_CC
257 #undef PSW_MASK_PM
258 #undef PSW_MASK_64
259 #undef PSW_MASK_32
260 #undef PSW_MASK_ESA_ADDR
262 #define PSW_MASK_PER 0x4000000000000000ULL
263 #define PSW_MASK_DAT 0x0400000000000000ULL
264 #define PSW_MASK_IO 0x0200000000000000ULL
265 #define PSW_MASK_EXT 0x0100000000000000ULL
266 #define PSW_MASK_KEY 0x00F0000000000000ULL
267 #define PSW_SHIFT_KEY 56
268 #define PSW_MASK_MCHECK 0x0004000000000000ULL
269 #define PSW_MASK_WAIT 0x0002000000000000ULL
270 #define PSW_MASK_PSTATE 0x0001000000000000ULL
271 #define PSW_MASK_ASC 0x0000C00000000000ULL
272 #define PSW_MASK_CC 0x0000300000000000ULL
273 #define PSW_MASK_PM 0x00000F0000000000ULL
274 #define PSW_MASK_64 0x0000000100000000ULL
275 #define PSW_MASK_32 0x0000000080000000ULL
276 #define PSW_MASK_ESA_ADDR 0x000000007fffffffULL
278 #undef PSW_ASC_PRIMARY
279 #undef PSW_ASC_ACCREG
280 #undef PSW_ASC_SECONDARY
281 #undef PSW_ASC_HOME
283 #define PSW_ASC_PRIMARY 0x0000000000000000ULL
284 #define PSW_ASC_ACCREG 0x0000400000000000ULL
285 #define PSW_ASC_SECONDARY 0x0000800000000000ULL
286 #define PSW_ASC_HOME 0x0000C00000000000ULL
288 /* tb flags */
290 #define FLAG_MASK_PER (PSW_MASK_PER >> 32)
291 #define FLAG_MASK_DAT (PSW_MASK_DAT >> 32)
292 #define FLAG_MASK_IO (PSW_MASK_IO >> 32)
293 #define FLAG_MASK_EXT (PSW_MASK_EXT >> 32)
294 #define FLAG_MASK_KEY (PSW_MASK_KEY >> 32)
295 #define FLAG_MASK_MCHECK (PSW_MASK_MCHECK >> 32)
296 #define FLAG_MASK_WAIT (PSW_MASK_WAIT >> 32)
297 #define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> 32)
298 #define FLAG_MASK_ASC (PSW_MASK_ASC >> 32)
299 #define FLAG_MASK_CC (PSW_MASK_CC >> 32)
300 #define FLAG_MASK_PM (PSW_MASK_PM >> 32)
301 #define FLAG_MASK_64 (PSW_MASK_64 >> 32)
302 #define FLAG_MASK_32 0x00001000
304 /* Control register 0 bits */
305 #define CR0_LOWPROT 0x0000000010000000ULL
306 #define CR0_EDAT 0x0000000000800000ULL
308 /* MMU */
309 #define MMU_PRIMARY_IDX 0
310 #define MMU_SECONDARY_IDX 1
311 #define MMU_HOME_IDX 2
313 static inline int cpu_mmu_index (CPUS390XState *env)
315 switch (env->psw.mask & PSW_MASK_ASC) {
316 case PSW_ASC_PRIMARY:
317 return MMU_PRIMARY_IDX;
318 case PSW_ASC_SECONDARY:
319 return MMU_SECONDARY_IDX;
320 case PSW_ASC_HOME:
321 return MMU_HOME_IDX;
322 case PSW_ASC_ACCREG:
323 /* Fallthrough: access register mode is not yet supported */
324 default:
325 abort();
329 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
331 switch (mmu_idx) {
332 case MMU_PRIMARY_IDX:
333 return PSW_ASC_PRIMARY;
334 case MMU_SECONDARY_IDX:
335 return PSW_ASC_SECONDARY;
336 case MMU_HOME_IDX:
337 return PSW_ASC_HOME;
338 default:
339 abort();
343 static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
344 target_ulong *cs_base, int *flags)
346 *pc = env->psw.addr;
347 *cs_base = 0;
348 *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
349 ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
352 /* While the PoO talks about ILC (a number between 1-3) what is actually
353 stored in LowCore is shifted left one bit (an even between 2-6). As
354 this is the actual length of the insn and therefore more useful, that
355 is what we want to pass around and manipulate. To make sure that we
356 have applied this distinction universally, rename the "ILC" to "ILEN". */
357 static inline int get_ilen(uint8_t opc)
359 switch (opc >> 6) {
360 case 0:
361 return 2;
362 case 1:
363 case 2:
364 return 4;
365 default:
366 return 6;
370 /* PER bits from control register 9 */
371 #define PER_CR9_EVENT_BRANCH 0x80000000
372 #define PER_CR9_EVENT_IFETCH 0x40000000
373 #define PER_CR9_EVENT_STORE 0x20000000
374 #define PER_CR9_EVENT_STORE_REAL 0x08000000
375 #define PER_CR9_EVENT_NULLIFICATION 0x01000000
376 #define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
377 #define PER_CR9_CONTROL_ALTERATION 0x00200000
379 /* PER bits from the PER CODE/ATMID/AI in lowcore */
380 #define PER_CODE_EVENT_BRANCH 0x8000
381 #define PER_CODE_EVENT_IFETCH 0x4000
382 #define PER_CODE_EVENT_STORE 0x2000
383 #define PER_CODE_EVENT_STORE_REAL 0x0800
384 #define PER_CODE_EVENT_NULLIFICATION 0x0100
386 /* Compute the ATMID field that is stored in the per_perc_atmid lowcore
387 entry when a PER exception is triggered. */
388 static inline uint8_t get_per_atmid(CPUS390XState *env)
390 return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
391 ( (1 << 6) ) |
392 ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
393 ((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) |
394 ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
395 ((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0);
398 /* Check if an address is within the PER starting address and the PER
399 ending address. The address range might loop. */
400 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
402 if (env->cregs[10] <= env->cregs[11]) {
403 return env->cregs[10] <= addr && addr <= env->cregs[11];
404 } else {
405 return env->cregs[10] <= addr || addr <= env->cregs[11];
409 #ifndef CONFIG_USER_ONLY
410 /* In several cases of runtime exceptions, we havn't recorded the true
411 instruction length. Use these codes when raising exceptions in order
412 to re-compute the length by examining the insn in memory. */
413 #define ILEN_LATER 0x20
414 #define ILEN_LATER_INC 0x21
415 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
416 #endif
418 S390CPU *cpu_s390x_init(const char *cpu_model);
419 void s390x_translate_init(void);
420 int cpu_s390x_exec(CPUS390XState *s);
422 /* you can call this signal handler from your SIGBUS and SIGSEGV
423 signal handlers to inform the virtual CPU of exceptions. non zero
424 is returned if the signal was handled by the virtual CPU. */
425 int cpu_s390x_signal_handler(int host_signum, void *pinfo,
426 void *puc);
427 int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
428 int mmu_idx);
430 #include "ioinst.h"
433 #ifndef CONFIG_USER_ONLY
434 void do_restart_interrupt(CPUS390XState *env);
436 static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
437 uint8_t *ar)
439 hwaddr addr = 0;
440 uint8_t reg;
442 reg = ipb >> 28;
443 if (reg > 0) {
444 addr = env->regs[reg];
446 addr += (ipb >> 16) & 0xfff;
447 if (ar) {
448 *ar = reg;
451 return addr;
454 /* Base/displacement are at the same locations. */
455 #define decode_basedisp_rs decode_basedisp_s
457 /* helper functions for run_on_cpu() */
458 static inline void s390_do_cpu_reset(void *arg)
460 CPUState *cs = arg;
461 S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
463 scc->cpu_reset(cs);
465 static inline void s390_do_cpu_full_reset(void *arg)
467 CPUState *cs = arg;
469 cpu_reset(cs);
472 void s390x_tod_timer(void *opaque);
473 void s390x_cpu_timer(void *opaque);
475 int s390_virtio_hypercall(CPUS390XState *env);
476 void s390_virtio_irq(int config_change, uint64_t token);
478 #ifdef CONFIG_KVM
479 void kvm_s390_virtio_irq(int config_change, uint64_t token);
480 void kvm_s390_service_interrupt(uint32_t parm);
481 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
482 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
483 int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
484 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
485 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
486 int len, bool is_write);
487 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
488 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
489 #else
490 static inline void kvm_s390_virtio_irq(int config_change, uint64_t token)
493 static inline void kvm_s390_service_interrupt(uint32_t parm)
496 static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
498 return -ENOSYS;
500 static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
502 return -ENOSYS;
504 static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
505 void *hostbuf, int len, bool is_write)
507 return -ENOSYS;
509 static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
510 uint64_t te_code)
513 #endif
515 static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
517 if (kvm_enabled()) {
518 return kvm_s390_get_clock(tod_high, tod_low);
520 /* Fixme TCG */
521 *tod_high = 0;
522 *tod_low = 0;
523 return 0;
526 static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
528 if (kvm_enabled()) {
529 return kvm_s390_set_clock(tod_high, tod_low);
531 /* Fixme TCG */
532 return 0;
535 S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
536 unsigned int s390_cpu_halt(S390CPU *cpu);
537 void s390_cpu_unhalt(S390CPU *cpu);
538 unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
539 static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
541 return cpu->env.cpu_state;
544 void gtod_save(QEMUFile *f, void *opaque);
545 int gtod_load(QEMUFile *f, void *opaque, int version_id);
547 /* service interrupts are floating therefore we must not pass an cpustate */
548 void s390_sclp_extint(uint32_t parm);
550 /* from s390-virtio-bus */
551 extern const hwaddr virtio_size;
553 #else
554 static inline unsigned int s390_cpu_halt(S390CPU *cpu)
556 return 0;
559 static inline void s390_cpu_unhalt(S390CPU *cpu)
563 static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
565 return 0;
567 #endif
568 void cpu_lock(void);
569 void cpu_unlock(void);
571 typedef struct SubchDev SubchDev;
573 #ifndef CONFIG_USER_ONLY
574 extern void io_subsystem_reset(void);
575 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
576 uint16_t schid);
577 bool css_subch_visible(SubchDev *sch);
578 void css_conditional_io_interrupt(SubchDev *sch);
579 int css_do_stsch(SubchDev *sch, SCHIB *schib);
580 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
581 int css_do_msch(SubchDev *sch, const SCHIB *schib);
582 int css_do_xsch(SubchDev *sch);
583 int css_do_csch(SubchDev *sch);
584 int css_do_hsch(SubchDev *sch);
585 int css_do_ssch(SubchDev *sch, ORB *orb);
586 int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
587 void css_do_tsch_update_subch(SubchDev *sch);
588 int css_do_stcrw(CRW *crw);
589 void css_undo_stcrw(CRW *crw);
590 int css_do_tpi(IOIntCode *int_code, int lowcore);
591 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
592 int rfmt, void *buf);
593 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
594 int css_enable_mcsse(void);
595 int css_enable_mss(void);
596 int css_do_rsch(SubchDev *sch);
597 int css_do_rchp(uint8_t cssid, uint8_t chpid);
598 bool css_present(uint8_t cssid);
599 #endif
601 #define cpu_init(model) CPU(cpu_s390x_init(model))
602 #define cpu_exec cpu_s390x_exec
603 #define cpu_gen_code cpu_s390x_gen_code
604 #define cpu_signal_handler cpu_s390x_signal_handler
606 void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
607 #define cpu_list s390_cpu_list
609 #include "exec/exec-all.h"
611 #define EXCP_EXT 1 /* external interrupt */
612 #define EXCP_SVC 2 /* supervisor call (syscall) */
613 #define EXCP_PGM 3 /* program interruption */
614 #define EXCP_IO 7 /* I/O interrupt */
615 #define EXCP_MCHK 8 /* machine check */
617 #define INTERRUPT_EXT (1 << 0)
618 #define INTERRUPT_TOD (1 << 1)
619 #define INTERRUPT_CPUTIMER (1 << 2)
620 #define INTERRUPT_IO (1 << 3)
621 #define INTERRUPT_MCHK (1 << 4)
623 /* Program Status Word. */
624 #define S390_PSWM_REGNUM 0
625 #define S390_PSWA_REGNUM 1
626 /* General Purpose Registers. */
627 #define S390_R0_REGNUM 2
628 #define S390_R1_REGNUM 3
629 #define S390_R2_REGNUM 4
630 #define S390_R3_REGNUM 5
631 #define S390_R4_REGNUM 6
632 #define S390_R5_REGNUM 7
633 #define S390_R6_REGNUM 8
634 #define S390_R7_REGNUM 9
635 #define S390_R8_REGNUM 10
636 #define S390_R9_REGNUM 11
637 #define S390_R10_REGNUM 12
638 #define S390_R11_REGNUM 13
639 #define S390_R12_REGNUM 14
640 #define S390_R13_REGNUM 15
641 #define S390_R14_REGNUM 16
642 #define S390_R15_REGNUM 17
643 /* Total Core Registers. */
644 #define S390_NUM_CORE_REGS 18
646 /* CC optimization */
648 enum cc_op {
649 CC_OP_CONST0 = 0, /* CC is 0 */
650 CC_OP_CONST1, /* CC is 1 */
651 CC_OP_CONST2, /* CC is 2 */
652 CC_OP_CONST3, /* CC is 3 */
654 CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
655 CC_OP_STATIC, /* CC value is env->cc_op */
657 CC_OP_NZ, /* env->cc_dst != 0 */
658 CC_OP_LTGT_32, /* signed less/greater than (32bit) */
659 CC_OP_LTGT_64, /* signed less/greater than (64bit) */
660 CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
661 CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
662 CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
663 CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
665 CC_OP_ADD_64, /* overflow on add (64bit) */
666 CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
667 CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
668 CC_OP_SUB_64, /* overflow on subtraction (64bit) */
669 CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
670 CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
671 CC_OP_ABS_64, /* sign eval on abs (64bit) */
672 CC_OP_NABS_64, /* sign eval on nabs (64bit) */
674 CC_OP_ADD_32, /* overflow on add (32bit) */
675 CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
676 CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
677 CC_OP_SUB_32, /* overflow on subtraction (32bit) */
678 CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
679 CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
680 CC_OP_ABS_32, /* sign eval on abs (64bit) */
681 CC_OP_NABS_32, /* sign eval on nabs (64bit) */
683 CC_OP_COMP_32, /* complement */
684 CC_OP_COMP_64, /* complement */
686 CC_OP_TM_32, /* test under mask (32bit) */
687 CC_OP_TM_64, /* test under mask (64bit) */
689 CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
690 CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
691 CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
693 CC_OP_ICM, /* insert characters under mask */
694 CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
695 CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
696 CC_OP_FLOGR, /* find leftmost one */
697 CC_OP_MAX
700 static const char *cc_names[] = {
701 [CC_OP_CONST0] = "CC_OP_CONST0",
702 [CC_OP_CONST1] = "CC_OP_CONST1",
703 [CC_OP_CONST2] = "CC_OP_CONST2",
704 [CC_OP_CONST3] = "CC_OP_CONST3",
705 [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
706 [CC_OP_STATIC] = "CC_OP_STATIC",
707 [CC_OP_NZ] = "CC_OP_NZ",
708 [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
709 [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
710 [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
711 [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
712 [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
713 [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
714 [CC_OP_ADD_64] = "CC_OP_ADD_64",
715 [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
716 [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
717 [CC_OP_SUB_64] = "CC_OP_SUB_64",
718 [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
719 [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
720 [CC_OP_ABS_64] = "CC_OP_ABS_64",
721 [CC_OP_NABS_64] = "CC_OP_NABS_64",
722 [CC_OP_ADD_32] = "CC_OP_ADD_32",
723 [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
724 [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
725 [CC_OP_SUB_32] = "CC_OP_SUB_32",
726 [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
727 [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
728 [CC_OP_ABS_32] = "CC_OP_ABS_32",
729 [CC_OP_NABS_32] = "CC_OP_NABS_32",
730 [CC_OP_COMP_32] = "CC_OP_COMP_32",
731 [CC_OP_COMP_64] = "CC_OP_COMP_64",
732 [CC_OP_TM_32] = "CC_OP_TM_32",
733 [CC_OP_TM_64] = "CC_OP_TM_64",
734 [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
735 [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
736 [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
737 [CC_OP_ICM] = "CC_OP_ICM",
738 [CC_OP_SLA_32] = "CC_OP_SLA_32",
739 [CC_OP_SLA_64] = "CC_OP_SLA_64",
740 [CC_OP_FLOGR] = "CC_OP_FLOGR",
743 static inline const char *cc_name(int cc_op)
745 return cc_names[cc_op];
748 static inline void setcc(S390CPU *cpu, uint64_t cc)
750 CPUS390XState *env = &cpu->env;
752 env->psw.mask &= ~(3ull << 44);
753 env->psw.mask |= (cc & 3) << 44;
754 env->cc_op = cc;
757 typedef struct LowCore
759 /* prefix area: defined by architecture */
760 uint32_t ccw1[2]; /* 0x000 */
761 uint32_t ccw2[4]; /* 0x008 */
762 uint8_t pad1[0x80-0x18]; /* 0x018 */
763 uint32_t ext_params; /* 0x080 */
764 uint16_t cpu_addr; /* 0x084 */
765 uint16_t ext_int_code; /* 0x086 */
766 uint16_t svc_ilen; /* 0x088 */
767 uint16_t svc_code; /* 0x08a */
768 uint16_t pgm_ilen; /* 0x08c */
769 uint16_t pgm_code; /* 0x08e */
770 uint32_t data_exc_code; /* 0x090 */
771 uint16_t mon_class_num; /* 0x094 */
772 uint16_t per_perc_atmid; /* 0x096 */
773 uint64_t per_address; /* 0x098 */
774 uint8_t exc_access_id; /* 0x0a0 */
775 uint8_t per_access_id; /* 0x0a1 */
776 uint8_t op_access_id; /* 0x0a2 */
777 uint8_t ar_access_id; /* 0x0a3 */
778 uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */
779 uint64_t trans_exc_code; /* 0x0a8 */
780 uint64_t monitor_code; /* 0x0b0 */
781 uint16_t subchannel_id; /* 0x0b8 */
782 uint16_t subchannel_nr; /* 0x0ba */
783 uint32_t io_int_parm; /* 0x0bc */
784 uint32_t io_int_word; /* 0x0c0 */
785 uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */
786 uint32_t stfl_fac_list; /* 0x0c8 */
787 uint8_t pad4[0xe8-0xcc]; /* 0x0cc */
788 uint32_t mcck_interruption_code[2]; /* 0x0e8 */
789 uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
790 uint32_t external_damage_code; /* 0x0f4 */
791 uint64_t failing_storage_address; /* 0x0f8 */
792 uint8_t pad6[0x110-0x100]; /* 0x100 */
793 uint64_t per_breaking_event_addr; /* 0x110 */
794 uint8_t pad7[0x120-0x118]; /* 0x118 */
795 PSW restart_old_psw; /* 0x120 */
796 PSW external_old_psw; /* 0x130 */
797 PSW svc_old_psw; /* 0x140 */
798 PSW program_old_psw; /* 0x150 */
799 PSW mcck_old_psw; /* 0x160 */
800 PSW io_old_psw; /* 0x170 */
801 uint8_t pad8[0x1a0-0x180]; /* 0x180 */
802 PSW restart_new_psw; /* 0x1a0 */
803 PSW external_new_psw; /* 0x1b0 */
804 PSW svc_new_psw; /* 0x1c0 */
805 PSW program_new_psw; /* 0x1d0 */
806 PSW mcck_new_psw; /* 0x1e0 */
807 PSW io_new_psw; /* 0x1f0 */
808 PSW return_psw; /* 0x200 */
809 uint8_t irb[64]; /* 0x210 */
810 uint64_t sync_enter_timer; /* 0x250 */
811 uint64_t async_enter_timer; /* 0x258 */
812 uint64_t exit_timer; /* 0x260 */
813 uint64_t last_update_timer; /* 0x268 */
814 uint64_t user_timer; /* 0x270 */
815 uint64_t system_timer; /* 0x278 */
816 uint64_t last_update_clock; /* 0x280 */
817 uint64_t steal_clock; /* 0x288 */
818 PSW return_mcck_psw; /* 0x290 */
819 uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */
820 /* System info area */
821 uint64_t save_area[16]; /* 0xc00 */
822 uint8_t pad10[0xd40-0xc80]; /* 0xc80 */
823 uint64_t kernel_stack; /* 0xd40 */
824 uint64_t thread_info; /* 0xd48 */
825 uint64_t async_stack; /* 0xd50 */
826 uint64_t kernel_asce; /* 0xd58 */
827 uint64_t user_asce; /* 0xd60 */
828 uint64_t panic_stack; /* 0xd68 */
829 uint64_t user_exec_asce; /* 0xd70 */
830 uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */
832 /* SMP info area: defined by DJB */
833 uint64_t clock_comparator; /* 0xdc0 */
834 uint64_t ext_call_fast; /* 0xdc8 */
835 uint64_t percpu_offset; /* 0xdd0 */
836 uint64_t current_task; /* 0xdd8 */
837 uint32_t softirq_pending; /* 0xde0 */
838 uint32_t pad_0x0de4; /* 0xde4 */
839 uint64_t int_clock; /* 0xde8 */
840 uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */
842 /* 0xe00 is used as indicator for dump tools */
843 /* whether the kernel died with panic() or not */
844 uint32_t panic_magic; /* 0xe00 */
846 uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */
848 /* 64 bit extparam used for pfault, diag 250 etc */
849 uint64_t ext_params2; /* 0x11B8 */
851 uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */
853 /* System info area */
855 uint64_t floating_pt_save_area[16]; /* 0x1200 */
856 uint64_t gpregs_save_area[16]; /* 0x1280 */
857 uint32_t st_status_fixed_logout[4]; /* 0x1300 */
858 uint8_t pad15[0x1318-0x1310]; /* 0x1310 */
859 uint32_t prefixreg_save_area; /* 0x1318 */
860 uint32_t fpt_creg_save_area; /* 0x131c */
861 uint8_t pad16[0x1324-0x1320]; /* 0x1320 */
862 uint32_t tod_progreg_save_area; /* 0x1324 */
863 uint32_t cpu_timer_save_area[2]; /* 0x1328 */
864 uint32_t clock_comp_save_area[2]; /* 0x1330 */
865 uint8_t pad17[0x1340-0x1338]; /* 0x1338 */
866 uint32_t access_regs_save_area[16]; /* 0x1340 */
867 uint64_t cregs_save_area[16]; /* 0x1380 */
869 /* align to the top of the prefix area */
871 uint8_t pad18[0x2000-0x1400]; /* 0x1400 */
872 } QEMU_PACKED LowCore;
874 /* STSI */
875 #define STSI_LEVEL_MASK 0x00000000f0000000ULL
876 #define STSI_LEVEL_CURRENT 0x0000000000000000ULL
877 #define STSI_LEVEL_1 0x0000000010000000ULL
878 #define STSI_LEVEL_2 0x0000000020000000ULL
879 #define STSI_LEVEL_3 0x0000000030000000ULL
880 #define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
881 #define STSI_R0_SEL1_MASK 0x00000000000000ffULL
882 #define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
883 #define STSI_R1_SEL2_MASK 0x000000000000ffffULL
885 /* Basic Machine Configuration */
886 struct sysib_111 {
887 uint32_t res1[8];
888 uint8_t manuf[16];
889 uint8_t type[4];
890 uint8_t res2[12];
891 uint8_t model[16];
892 uint8_t sequence[16];
893 uint8_t plant[4];
894 uint8_t res3[156];
897 /* Basic Machine CPU */
898 struct sysib_121 {
899 uint32_t res1[80];
900 uint8_t sequence[16];
901 uint8_t plant[4];
902 uint8_t res2[2];
903 uint16_t cpu_addr;
904 uint8_t res3[152];
907 /* Basic Machine CPUs */
908 struct sysib_122 {
909 uint8_t res1[32];
910 uint32_t capability;
911 uint16_t total_cpus;
912 uint16_t active_cpus;
913 uint16_t standby_cpus;
914 uint16_t reserved_cpus;
915 uint16_t adjustments[2026];
918 /* LPAR CPU */
919 struct sysib_221 {
920 uint32_t res1[80];
921 uint8_t sequence[16];
922 uint8_t plant[4];
923 uint16_t cpu_id;
924 uint16_t cpu_addr;
925 uint8_t res3[152];
928 /* LPAR CPUs */
929 struct sysib_222 {
930 uint32_t res1[32];
931 uint16_t lpar_num;
932 uint8_t res2;
933 uint8_t lcpuc;
934 uint16_t total_cpus;
935 uint16_t conf_cpus;
936 uint16_t standby_cpus;
937 uint16_t reserved_cpus;
938 uint8_t name[8];
939 uint32_t caf;
940 uint8_t res3[16];
941 uint16_t dedicated_cpus;
942 uint16_t shared_cpus;
943 uint8_t res4[180];
946 /* VM CPUs */
947 struct sysib_322 {
948 uint8_t res1[31];
949 uint8_t count;
950 struct {
951 uint8_t res2[4];
952 uint16_t total_cpus;
953 uint16_t conf_cpus;
954 uint16_t standby_cpus;
955 uint16_t reserved_cpus;
956 uint8_t name[8];
957 uint32_t caf;
958 uint8_t cpi[16];
959 uint8_t res5[3];
960 uint8_t ext_name_encoding;
961 uint32_t res3;
962 uint8_t uuid[16];
963 } vm[8];
964 uint8_t res4[1504];
965 uint8_t ext_names[8][256];
968 /* MMU defines */
969 #define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
970 #define _ASCE_SUBSPACE 0x200 /* subspace group control */
971 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
972 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
973 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
974 #define _ASCE_REAL_SPACE 0x20 /* real space control */
975 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
976 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
977 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
978 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
979 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
980 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
982 #define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
983 #define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */
984 #define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */
985 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
986 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
987 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
988 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
989 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
990 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
992 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */
993 #define _SEGMENT_ENTRY_FC 0x400 /* format control */
994 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
995 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
997 #define _PAGE_RO 0x200 /* HW read-only bit */
998 #define _PAGE_INVALID 0x400 /* HW invalid bit */
999 #define _PAGE_RES0 0x800 /* bit must be zero */
1001 #define SK_C (0x1 << 1)
1002 #define SK_R (0x1 << 2)
1003 #define SK_F (0x1 << 3)
1004 #define SK_ACC_MASK (0xf << 4)
1006 /* SIGP order codes */
1007 #define SIGP_SENSE 0x01
1008 #define SIGP_EXTERNAL_CALL 0x02
1009 #define SIGP_EMERGENCY 0x03
1010 #define SIGP_START 0x04
1011 #define SIGP_STOP 0x05
1012 #define SIGP_RESTART 0x06
1013 #define SIGP_STOP_STORE_STATUS 0x09
1014 #define SIGP_INITIAL_CPU_RESET 0x0b
1015 #define SIGP_CPU_RESET 0x0c
1016 #define SIGP_SET_PREFIX 0x0d
1017 #define SIGP_STORE_STATUS_ADDR 0x0e
1018 #define SIGP_SET_ARCH 0x12
1019 #define SIGP_STORE_ADTL_STATUS 0x17
1021 /* SIGP condition codes */
1022 #define SIGP_CC_ORDER_CODE_ACCEPTED 0
1023 #define SIGP_CC_STATUS_STORED 1
1024 #define SIGP_CC_BUSY 2
1025 #define SIGP_CC_NOT_OPERATIONAL 3
1027 /* SIGP status bits */
1028 #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
1029 #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
1030 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
1031 #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
1032 #define SIGP_STAT_STOPPED 0x00000040UL
1033 #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
1034 #define SIGP_STAT_CHECK_STOP 0x00000010UL
1035 #define SIGP_STAT_INOPERATIVE 0x00000004UL
1036 #define SIGP_STAT_INVALID_ORDER 0x00000002UL
1037 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
1039 /* SIGP SET ARCHITECTURE modes */
1040 #define SIGP_MODE_ESA_S390 0
1041 #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
1042 #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
1044 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
1045 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
1046 target_ulong *raddr, int *flags, bool exc);
1047 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
1048 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
1049 uint64_t vr);
1050 void s390_cpu_recompute_watchpoints(CPUState *cs);
1052 int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
1053 int len, bool is_write);
1055 #define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
1056 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
1057 #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
1058 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
1059 #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
1060 s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
1062 /* The value of the TOD clock for 1.1.1970. */
1063 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
1065 /* Converts ns to s390's clock format */
1066 static inline uint64_t time2tod(uint64_t ns) {
1067 return (ns << 9) / 125;
1070 /* Converts s390's clock format to ns */
1071 static inline uint64_t tod2time(uint64_t t) {
1072 return (t * 125) >> 9;
1075 static inline void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
1076 uint64_t param64)
1078 CPUS390XState *env = &cpu->env;
1080 if (env->ext_index == MAX_EXT_QUEUE - 1) {
1081 /* ugh - can't queue anymore. Let's drop. */
1082 return;
1085 env->ext_index++;
1086 assert(env->ext_index < MAX_EXT_QUEUE);
1088 env->ext_queue[env->ext_index].code = code;
1089 env->ext_queue[env->ext_index].param = param;
1090 env->ext_queue[env->ext_index].param64 = param64;
1092 env->pending_int |= INTERRUPT_EXT;
1093 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
1096 static inline void cpu_inject_io(S390CPU *cpu, uint16_t subchannel_id,
1097 uint16_t subchannel_number,
1098 uint32_t io_int_parm, uint32_t io_int_word)
1100 CPUS390XState *env = &cpu->env;
1101 int isc = IO_INT_WORD_ISC(io_int_word);
1103 if (env->io_index[isc] == MAX_IO_QUEUE - 1) {
1104 /* ugh - can't queue anymore. Let's drop. */
1105 return;
1108 env->io_index[isc]++;
1109 assert(env->io_index[isc] < MAX_IO_QUEUE);
1111 env->io_queue[env->io_index[isc]][isc].id = subchannel_id;
1112 env->io_queue[env->io_index[isc]][isc].nr = subchannel_number;
1113 env->io_queue[env->io_index[isc]][isc].parm = io_int_parm;
1114 env->io_queue[env->io_index[isc]][isc].word = io_int_word;
1116 env->pending_int |= INTERRUPT_IO;
1117 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
1120 static inline void cpu_inject_crw_mchk(S390CPU *cpu)
1122 CPUS390XState *env = &cpu->env;
1124 if (env->mchk_index == MAX_MCHK_QUEUE - 1) {
1125 /* ugh - can't queue anymore. Let's drop. */
1126 return;
1129 env->mchk_index++;
1130 assert(env->mchk_index < MAX_MCHK_QUEUE);
1132 env->mchk_queue[env->mchk_index].type = 1;
1134 env->pending_int |= INTERRUPT_MCHK;
1135 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
1138 /* from s390-virtio-ccw */
1139 #define MEM_SECTION_SIZE 0x10000000UL
1140 #define MAX_AVAIL_SLOTS 32
1142 /* fpu_helper.c */
1143 uint32_t set_cc_nz_f32(float32 v);
1144 uint32_t set_cc_nz_f64(float64 v);
1145 uint32_t set_cc_nz_f128(float128 v);
1147 /* misc_helper.c */
1148 #ifndef CONFIG_USER_ONLY
1149 int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
1150 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
1151 #endif
1152 void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
1153 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
1154 uintptr_t retaddr);
1156 #ifdef CONFIG_KVM
1157 void kvm_s390_io_interrupt(uint16_t subchannel_id,
1158 uint16_t subchannel_nr, uint32_t io_int_parm,
1159 uint32_t io_int_word);
1160 void kvm_s390_crw_mchk(void);
1161 void kvm_s390_enable_css_support(S390CPU *cpu);
1162 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1163 int vq, bool assign);
1164 int kvm_s390_cpu_restart(S390CPU *cpu);
1165 int kvm_s390_get_memslot_count(KVMState *s);
1166 void kvm_s390_clear_cmma_callback(void *opaque);
1167 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
1168 void kvm_s390_reset_vcpu(S390CPU *cpu);
1169 int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit);
1170 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
1171 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
1172 #else
1173 static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
1174 uint16_t subchannel_nr,
1175 uint32_t io_int_parm,
1176 uint32_t io_int_word)
1179 static inline void kvm_s390_crw_mchk(void)
1182 static inline void kvm_s390_enable_css_support(S390CPU *cpu)
1185 static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
1186 uint32_t sch, int vq,
1187 bool assign)
1189 return -ENOSYS;
1191 static inline int kvm_s390_cpu_restart(S390CPU *cpu)
1193 return -ENOSYS;
1195 static inline void kvm_s390_clear_cmma_callback(void *opaque)
1198 static inline int kvm_s390_get_memslot_count(KVMState *s)
1200 return MAX_AVAIL_SLOTS;
1202 static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1204 return -ENOSYS;
1206 static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
1209 static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit,
1210 uint64_t *hw_limit)
1212 return 0;
1214 static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
1217 static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
1219 return 0;
1221 #endif
1223 static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
1225 if (kvm_enabled()) {
1226 return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit);
1228 return 0;
1231 static inline void cmma_reset(S390CPU *cpu)
1233 if (kvm_enabled()) {
1234 CPUState *cs = CPU(cpu);
1235 kvm_s390_clear_cmma_callback(cs->kvm_state);
1239 static inline int s390_cpu_restart(S390CPU *cpu)
1241 if (kvm_enabled()) {
1242 return kvm_s390_cpu_restart(cpu);
1244 return -ENOSYS;
1247 static inline int s390_get_memslot_count(KVMState *s)
1249 if (kvm_enabled()) {
1250 return kvm_s390_get_memslot_count(s);
1251 } else {
1252 return MAX_AVAIL_SLOTS;
1256 void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
1257 uint32_t io_int_parm, uint32_t io_int_word);
1258 void s390_crw_mchk(void);
1260 static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
1261 uint32_t sch_id, int vq,
1262 bool assign)
1264 return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
1267 #ifdef CONFIG_KVM
1268 static inline bool vregs_needed(void *opaque)
1270 if (kvm_enabled()) {
1271 return kvm_check_extension(kvm_state, KVM_CAP_S390_VECTOR_REGISTERS);
1273 return 0;
1275 #else
1276 static inline bool vregs_needed(void *opaque)
1278 return 0;
1280 #endif
1281 #endif