include/standard-headers: add pvrdma related headers
[qemu.git] / target / s390x / sigp.c
blobac3f8e7dc2afccec2eb7923273b18cbf1bd08c4f
1 /*
2 * s390x SIGP instruction handling
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5 * Copyright IBM Corp. 2012
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "qemu-common.h"
13 #include "cpu.h"
14 #include "internal.h"
15 #include "sysemu/hw_accel.h"
16 #include "exec/address-spaces.h"
17 #include "exec/exec-all.h"
18 #include "sysemu/sysemu.h"
19 #include "trace.h"
21 QemuMutex qemu_sigp_mutex;
23 typedef struct SigpInfo {
24 uint64_t param;
25 int cc;
26 uint64_t *status_reg;
27 } SigpInfo;
29 static void set_sigp_status(SigpInfo *si, uint64_t status)
31 *si->status_reg &= 0xffffffff00000000ULL;
32 *si->status_reg |= status;
33 si->cc = SIGP_CC_STATUS_STORED;
36 static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si)
38 uint8_t state = s390_cpu_get_state(dst_cpu);
39 bool ext_call = dst_cpu->env.pending_int & INTERRUPT_EXTERNAL_CALL;
40 uint64_t status = 0;
42 if (!tcg_enabled()) {
43 /* handled in KVM */
44 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
45 return;
48 /* sensing without locks is racy, but it's the same for real hw */
49 if (state != CPU_STATE_STOPPED && !ext_call) {
50 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
51 } else {
52 if (ext_call) {
53 status |= SIGP_STAT_EXT_CALL_PENDING;
55 if (state == CPU_STATE_STOPPED) {
56 status |= SIGP_STAT_STOPPED;
58 set_sigp_status(si, status);
62 static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
64 int ret;
66 if (!tcg_enabled()) {
67 /* handled in KVM */
68 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
69 return;
72 ret = cpu_inject_external_call(dst_cpu, src_cpu->env.core_id);
73 if (!ret) {
74 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
75 } else {
76 set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING);
80 static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
82 if (!tcg_enabled()) {
83 /* handled in KVM */
84 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
85 return;
88 cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
89 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
92 static void sigp_start(CPUState *cs, run_on_cpu_data arg)
94 S390CPU *cpu = S390_CPU(cs);
95 SigpInfo *si = arg.host_ptr;
97 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
98 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
99 return;
102 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
103 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
106 static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
108 S390CPU *cpu = S390_CPU(cs);
109 SigpInfo *si = arg.host_ptr;
111 if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
112 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
113 return;
116 /* disabled wait - sleeping in user space */
117 if (cs->halted) {
118 s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
119 } else {
120 /* execute the stop function */
121 cpu->env.sigp_order = SIGP_STOP;
122 cpu_inject_stop(cpu);
124 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
127 static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
129 S390CPU *cpu = S390_CPU(cs);
130 SigpInfo *si = arg.host_ptr;
132 /* disabled wait - sleeping in user space */
133 if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
134 s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
137 switch (s390_cpu_get_state(cpu)) {
138 case CPU_STATE_OPERATING:
139 cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
140 cpu_inject_stop(cpu);
141 /* store will be performed in do_stop_interrup() */
142 break;
143 case CPU_STATE_STOPPED:
144 /* already stopped, just store the status */
145 cpu_synchronize_state(cs);
146 s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
147 break;
149 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
152 static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
154 S390CPU *cpu = S390_CPU(cs);
155 SigpInfo *si = arg.host_ptr;
156 uint32_t address = si->param & 0x7ffffe00u;
158 /* cpu has to be stopped */
159 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
160 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
161 return;
164 cpu_synchronize_state(cs);
166 if (s390_store_status(cpu, address, false)) {
167 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
168 return;
170 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
173 #define ADTL_SAVE_LC_MASK 0xfUL
174 static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
176 S390CPU *cpu = S390_CPU(cs);
177 SigpInfo *si = arg.host_ptr;
178 uint8_t lc = si->param & ADTL_SAVE_LC_MASK;
179 hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK;
180 hwaddr len = 1UL << (lc ? lc : 10);
182 if (!s390_has_feat(S390_FEAT_VECTOR) &&
183 !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
184 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
185 return;
188 /* cpu has to be stopped */
189 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
190 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
191 return;
194 /* address must be aligned to length */
195 if (addr & (len - 1)) {
196 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
197 return;
200 /* no GS: only lc == 0 is valid */
201 if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
202 lc != 0) {
203 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
204 return;
207 /* GS: 0, 10, 11, 12 are valid */
208 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
209 lc != 0 &&
210 lc != 10 &&
211 lc != 11 &&
212 lc != 12) {
213 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
214 return;
217 cpu_synchronize_state(cs);
219 if (s390_store_adtl_status(cpu, addr, len)) {
220 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
221 return;
223 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
226 static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
228 S390CPU *cpu = S390_CPU(cs);
229 SigpInfo *si = arg.host_ptr;
231 switch (s390_cpu_get_state(cpu)) {
232 case CPU_STATE_STOPPED:
233 /* the restart irq has to be delivered prior to any other pending irq */
234 cpu_synchronize_state(cs);
236 * Set OPERATING (and unhalting) before loading the restart PSW.
237 * load_psw() will then properly halt the CPU again if necessary (TCG).
239 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
240 do_restart_interrupt(&cpu->env);
241 break;
242 case CPU_STATE_OPERATING:
243 cpu_inject_restart(cpu);
244 break;
246 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
249 static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
251 S390CPU *cpu = S390_CPU(cs);
252 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
253 SigpInfo *si = arg.host_ptr;
255 cpu_synchronize_state(cs);
256 scc->initial_cpu_reset(cs);
257 cpu_synchronize_post_reset(cs);
258 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
261 static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
263 S390CPU *cpu = S390_CPU(cs);
264 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
265 SigpInfo *si = arg.host_ptr;
267 cpu_synchronize_state(cs);
268 scc->cpu_reset(cs);
269 cpu_synchronize_post_reset(cs);
270 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
273 static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
275 S390CPU *cpu = S390_CPU(cs);
276 SigpInfo *si = arg.host_ptr;
277 uint32_t addr = si->param & 0x7fffe000u;
279 cpu_synchronize_state(cs);
281 if (!address_space_access_valid(&address_space_memory, addr,
282 sizeof(struct LowCore), false)) {
283 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
284 return;
287 /* cpu has to be stopped */
288 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
289 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
290 return;
293 cpu->env.psa = addr;
294 tlb_flush(cs);
295 cpu_synchronize_post_init(cs);
296 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
299 static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu,
300 SigpInfo *si)
302 const uint64_t psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
303 uint16_t p_asn, s_asn, asn;
304 uint64_t psw_addr, psw_mask;
305 bool idle;
307 if (!tcg_enabled()) {
308 /* handled in KVM */
309 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
310 return;
313 /* this looks racy, but these values are only used when STOPPED */
314 idle = CPU(dst_cpu)->halted;
315 psw_addr = dst_cpu->env.psw.addr;
316 psw_mask = dst_cpu->env.psw.mask;
317 asn = si->param;
318 p_asn = dst_cpu->env.cregs[4] & 0xffff; /* Primary ASN */
319 s_asn = dst_cpu->env.cregs[3] & 0xffff; /* Secondary ASN */
321 if (s390_cpu_get_state(dst_cpu) != CPU_STATE_STOPPED ||
322 (psw_mask & psw_int_mask) != psw_int_mask ||
323 (idle && psw_addr != 0) ||
324 (!idle && (asn == p_asn || asn == s_asn))) {
325 cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
326 } else {
327 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
330 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
333 static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
335 if (!tcg_enabled()) {
336 /* handled in KVM */
337 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
338 return;
341 /* sensing without locks is racy, but it's the same for real hw */
342 if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) {
343 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
344 return;
347 /* If halted (which includes also STOPPED), it is not running */
348 if (CPU(dst_cpu)->halted) {
349 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 } else {
351 set_sigp_status(si, SIGP_STAT_NOT_RUNNING);
355 static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
356 uint64_t param, uint64_t *status_reg)
358 SigpInfo si = {
359 .param = param,
360 .status_reg = status_reg,
363 /* cpu available? */
364 if (dst_cpu == NULL) {
365 return SIGP_CC_NOT_OPERATIONAL;
368 /* only resets can break pending orders */
369 if (dst_cpu->env.sigp_order != 0 &&
370 order != SIGP_CPU_RESET &&
371 order != SIGP_INITIAL_CPU_RESET) {
372 return SIGP_CC_BUSY;
375 switch (order) {
376 case SIGP_SENSE:
377 sigp_sense(dst_cpu, &si);
378 break;
379 case SIGP_EXTERNAL_CALL:
380 sigp_external_call(cpu, dst_cpu, &si);
381 break;
382 case SIGP_EMERGENCY:
383 sigp_emergency(cpu, dst_cpu, &si);
384 break;
385 case SIGP_START:
386 run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
387 break;
388 case SIGP_STOP:
389 run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
390 break;
391 case SIGP_RESTART:
392 run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
393 break;
394 case SIGP_STOP_STORE_STATUS:
395 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
396 break;
397 case SIGP_STORE_STATUS_ADDR:
398 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
399 break;
400 case SIGP_STORE_ADTL_STATUS:
401 run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
402 break;
403 case SIGP_SET_PREFIX:
404 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
405 break;
406 case SIGP_INITIAL_CPU_RESET:
407 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
408 break;
409 case SIGP_CPU_RESET:
410 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
411 break;
412 case SIGP_COND_EMERGENCY:
413 sigp_cond_emergency(cpu, dst_cpu, &si);
414 break;
415 case SIGP_SENSE_RUNNING:
416 sigp_sense_running(dst_cpu, &si);
417 break;
418 default:
419 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
422 return si.cc;
425 static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
426 uint64_t *status_reg)
428 CPUState *cur_cs;
429 S390CPU *cur_cpu;
430 bool all_stopped = true;
432 CPU_FOREACH(cur_cs) {
433 cur_cpu = S390_CPU(cur_cs);
435 if (cur_cpu == cpu) {
436 continue;
438 if (s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
439 all_stopped = false;
443 *status_reg &= 0xffffffff00000000ULL;
445 /* Reject set arch order, with czam we're always in z/Arch mode. */
446 *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER :
447 SIGP_STAT_INCORRECT_STATE);
448 return SIGP_CC_STATUS_STORED;
451 int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3)
453 uint64_t *status_reg = &env->regs[r1];
454 uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
455 S390CPU *cpu = s390_env_get_cpu(env);
456 S390CPU *dst_cpu = NULL;
457 int ret;
459 if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
460 ret = SIGP_CC_BUSY;
461 goto out;
464 switch (order) {
465 case SIGP_SET_ARCH:
466 ret = sigp_set_architecture(cpu, param, status_reg);
467 break;
468 default:
469 /* all other sigp orders target a single vcpu */
470 dst_cpu = s390_cpu_addr2state(env->regs[r3]);
471 ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg);
473 qemu_mutex_unlock(&qemu_sigp_mutex);
475 out:
476 trace_sigp_finished(order, CPU(cpu)->cpu_index,
477 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
478 g_assert(ret >= 0);
480 return ret;
483 int s390_cpu_restart(S390CPU *cpu)
485 SigpInfo si = {};
487 run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
488 return 0;
491 void do_stop_interrupt(CPUS390XState *env)
493 S390CPU *cpu = s390_env_get_cpu(env);
495 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
496 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
498 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
499 s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
501 env->sigp_order = 0;
502 env->pending_int &= ~INTERRUPT_STOP;
505 void s390_init_sigp(void)
507 qemu_mutex_init(&qemu_sigp_mutex);