qdev: Check for the availability of a hotplug controller before adding a device
[qemu/kevin.git] / target / s390x / misc_helper.c
blob86da6aab7eb6a650aa47375983f94e9d91aad75e
1 /*
2 * S/390 misc helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internal.h"
25 #include "exec/memory.h"
26 #include "qemu/host-utils.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/timer.h"
29 #include "exec/address-spaces.h"
30 #include "exec/exec-all.h"
31 #include "exec/cpu_ldst.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "sysemu/cpus.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/s390x/ebcdic.h"
37 #include "hw/s390x/s390-virtio-hcall.h"
38 #include "hw/s390x/sclp.h"
39 #endif
41 /* #define DEBUG_HELPER */
42 #ifdef DEBUG_HELPER
43 #define HELPER_LOG(x...) qemu_log(x)
44 #else
45 #define HELPER_LOG(x...)
46 #endif
48 /* Raise an exception statically from a TB. */
49 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
51 CPUState *cs = CPU(s390_env_get_cpu(env));
53 HELPER_LOG("%s: exception %d\n", __func__, excp);
54 cs->exception_index = excp;
55 cpu_loop_exit(cs);
58 /* Store CPU Timer (also used for EXTRACT CPU TIME) */
59 uint64_t HELPER(stpt)(CPUS390XState *env)
61 #if defined(CONFIG_USER_ONLY)
63 * Fake a descending CPU timer. We could get negative values here,
64 * but we don't care as it is up to the OS when to process that
65 * interrupt and reset to > 0.
67 return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
68 #else
69 return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
70 #endif
73 #ifndef CONFIG_USER_ONLY
75 /* SCLP service call */
76 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
78 qemu_mutex_lock_iothread();
79 int r = sclp_service_call(env, r1, r2);
80 qemu_mutex_unlock_iothread();
81 if (r < 0) {
82 s390_program_interrupt(env, -r, 4, GETPC());
84 return r;
87 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
89 uint64_t r;
91 switch (num) {
92 case 0x500:
93 /* KVM hypercall */
94 qemu_mutex_lock_iothread();
95 r = s390_virtio_hypercall(env);
96 qemu_mutex_unlock_iothread();
97 break;
98 case 0x44:
99 /* yield */
100 r = 0;
101 break;
102 case 0x308:
103 /* ipl */
104 qemu_mutex_lock_iothread();
105 handle_diag_308(env, r1, r3, GETPC());
106 qemu_mutex_unlock_iothread();
107 r = 0;
108 break;
109 case 0x288:
110 /* time bomb (watchdog) */
111 r = handle_diag_288(env, r1, r3);
112 break;
113 default:
114 r = -1;
115 break;
118 if (r) {
119 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
123 /* Set Prefix */
124 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
126 CPUState *cs = CPU(s390_env_get_cpu(env));
127 uint32_t prefix = a1 & 0x7fffe000;
129 env->psa = prefix;
130 HELPER_LOG("prefix: %#x\n", prefix);
131 tlb_flush_page(cs, 0);
132 tlb_flush_page(cs, TARGET_PAGE_SIZE);
135 /* Store Clock */
136 uint64_t HELPER(stck)(CPUS390XState *env)
138 uint64_t time;
140 time = env->tod_offset +
141 time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
143 return time;
146 /* Set Clock Comparator */
147 void HELPER(sckc)(CPUS390XState *env, uint64_t time)
149 if (time == -1ULL) {
150 return;
153 env->ckc = time;
155 /* difference between origins */
156 time -= env->tod_offset;
158 /* nanoseconds */
159 time = tod2time(time);
161 timer_mod(env->tod_timer, env->tod_basetime + time);
164 /* Set Tod Programmable Field */
165 void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
167 uint32_t val = r0;
169 if (val & 0xffff0000) {
170 s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
172 env->todpr = val;
175 /* Store Clock Comparator */
176 uint64_t HELPER(stckc)(CPUS390XState *env)
178 return env->ckc;
181 /* Set CPU Timer */
182 void HELPER(spt)(CPUS390XState *env, uint64_t time)
184 if (time == -1ULL) {
185 return;
188 /* nanoseconds */
189 time = tod2time(time);
191 env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
193 timer_mod(env->cpu_timer, env->cputm);
196 /* Store System Information */
197 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
198 uint64_t r0, uint64_t r1)
200 S390CPU *cpu = s390_env_get_cpu(env);
201 int cc = 0;
202 int sel1, sel2;
204 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
205 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
206 /* valid function code, invalid reserved bits */
207 s390_program_interrupt(env, PGM_SPECIFICATION, 4, GETPC());
210 sel1 = r0 & STSI_R0_SEL1_MASK;
211 sel2 = r1 & STSI_R1_SEL2_MASK;
213 /* XXX: spec exception if sysib is not 4k-aligned */
215 switch (r0 & STSI_LEVEL_MASK) {
216 case STSI_LEVEL_1:
217 if ((sel1 == 1) && (sel2 == 1)) {
218 /* Basic Machine Configuration */
219 struct sysib_111 sysib;
220 char type[5] = {};
222 memset(&sysib, 0, sizeof(sysib));
223 ebcdic_put(sysib.manuf, "QEMU ", 16);
224 /* same as machine type number in STORE CPU ID, but in EBCDIC */
225 snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
226 ebcdic_put(sysib.type, type, 4);
227 /* model number (not stored in STORE CPU ID for z/Architecure) */
228 ebcdic_put(sysib.model, "QEMU ", 16);
229 ebcdic_put(sysib.sequence, "QEMU ", 16);
230 ebcdic_put(sysib.plant, "QEMU", 4);
231 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
232 } else if ((sel1 == 2) && (sel2 == 1)) {
233 /* Basic Machine CPU */
234 struct sysib_121 sysib;
236 memset(&sysib, 0, sizeof(sysib));
237 /* XXX make different for different CPUs? */
238 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
239 ebcdic_put(sysib.plant, "QEMU", 4);
240 stw_p(&sysib.cpu_addr, env->core_id);
241 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
242 } else if ((sel1 == 2) && (sel2 == 2)) {
243 /* Basic Machine CPUs */
244 struct sysib_122 sysib;
246 memset(&sysib, 0, sizeof(sysib));
247 stl_p(&sysib.capability, 0x443afc29);
248 /* XXX change when SMP comes */
249 stw_p(&sysib.total_cpus, 1);
250 stw_p(&sysib.active_cpus, 1);
251 stw_p(&sysib.standby_cpus, 0);
252 stw_p(&sysib.reserved_cpus, 0);
253 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
254 } else {
255 cc = 3;
257 break;
258 case STSI_LEVEL_2:
260 if ((sel1 == 2) && (sel2 == 1)) {
261 /* LPAR CPU */
262 struct sysib_221 sysib;
264 memset(&sysib, 0, sizeof(sysib));
265 /* XXX make different for different CPUs? */
266 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
267 ebcdic_put(sysib.plant, "QEMU", 4);
268 stw_p(&sysib.cpu_addr, env->core_id);
269 stw_p(&sysib.cpu_id, 0);
270 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
271 } else if ((sel1 == 2) && (sel2 == 2)) {
272 /* LPAR CPUs */
273 struct sysib_222 sysib;
275 memset(&sysib, 0, sizeof(sysib));
276 stw_p(&sysib.lpar_num, 0);
277 sysib.lcpuc = 0;
278 /* XXX change when SMP comes */
279 stw_p(&sysib.total_cpus, 1);
280 stw_p(&sysib.conf_cpus, 1);
281 stw_p(&sysib.standby_cpus, 0);
282 stw_p(&sysib.reserved_cpus, 0);
283 ebcdic_put(sysib.name, "QEMU ", 8);
284 stl_p(&sysib.caf, 1000);
285 stw_p(&sysib.dedicated_cpus, 0);
286 stw_p(&sysib.shared_cpus, 0);
287 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
288 } else {
289 cc = 3;
291 break;
293 case STSI_LEVEL_3:
295 if ((sel1 == 2) && (sel2 == 2)) {
296 /* VM CPUs */
297 struct sysib_322 sysib;
299 memset(&sysib, 0, sizeof(sysib));
300 sysib.count = 1;
301 /* XXX change when SMP comes */
302 stw_p(&sysib.vm[0].total_cpus, 1);
303 stw_p(&sysib.vm[0].conf_cpus, 1);
304 stw_p(&sysib.vm[0].standby_cpus, 0);
305 stw_p(&sysib.vm[0].reserved_cpus, 0);
306 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
307 stl_p(&sysib.vm[0].caf, 1000);
308 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
309 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
310 } else {
311 cc = 3;
313 break;
315 case STSI_LEVEL_CURRENT:
316 env->regs[0] = STSI_LEVEL_3;
317 break;
318 default:
319 cc = 3;
320 break;
323 return cc;
326 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
327 uint32_t r3)
329 int cc;
331 /* TODO: needed to inject interrupts - push further down */
332 qemu_mutex_lock_iothread();
333 cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
334 qemu_mutex_unlock_iothread();
336 return cc;
338 #endif
340 #ifndef CONFIG_USER_ONLY
341 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
343 S390CPU *cpu = s390_env_get_cpu(env);
344 qemu_mutex_lock_iothread();
345 ioinst_handle_xsch(cpu, r1, GETPC());
346 qemu_mutex_unlock_iothread();
349 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
351 S390CPU *cpu = s390_env_get_cpu(env);
352 qemu_mutex_lock_iothread();
353 ioinst_handle_csch(cpu, r1, GETPC());
354 qemu_mutex_unlock_iothread();
357 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
359 S390CPU *cpu = s390_env_get_cpu(env);
360 qemu_mutex_lock_iothread();
361 ioinst_handle_hsch(cpu, r1, GETPC());
362 qemu_mutex_unlock_iothread();
365 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
367 S390CPU *cpu = s390_env_get_cpu(env);
368 qemu_mutex_lock_iothread();
369 ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
370 qemu_mutex_unlock_iothread();
373 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
375 S390CPU *cpu = s390_env_get_cpu(env);
376 qemu_mutex_lock_iothread();
377 ioinst_handle_rchp(cpu, r1, GETPC());
378 qemu_mutex_unlock_iothread();
381 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
383 S390CPU *cpu = s390_env_get_cpu(env);
384 qemu_mutex_lock_iothread();
385 ioinst_handle_rsch(cpu, r1, GETPC());
386 qemu_mutex_unlock_iothread();
389 void HELPER(sal)(CPUS390XState *env, uint64_t r1)
391 S390CPU *cpu = s390_env_get_cpu(env);
393 qemu_mutex_lock_iothread();
394 ioinst_handle_sal(cpu, r1, GETPC());
395 qemu_mutex_unlock_iothread();
398 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
400 S390CPU *cpu = s390_env_get_cpu(env);
402 qemu_mutex_lock_iothread();
403 ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
404 qemu_mutex_unlock_iothread();
407 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
409 S390CPU *cpu = s390_env_get_cpu(env);
410 qemu_mutex_lock_iothread();
411 ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
412 qemu_mutex_unlock_iothread();
415 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
417 S390CPU *cpu = s390_env_get_cpu(env);
419 qemu_mutex_lock_iothread();
420 ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
421 qemu_mutex_unlock_iothread();
424 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
426 S390CPU *cpu = s390_env_get_cpu(env);
427 qemu_mutex_lock_iothread();
428 ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
429 qemu_mutex_unlock_iothread();
432 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
434 S390CPU *cpu = s390_env_get_cpu(env);
435 qemu_mutex_lock_iothread();
436 ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
437 qemu_mutex_unlock_iothread();
440 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
442 S390CPU *cpu = s390_env_get_cpu(env);
443 qemu_mutex_lock_iothread();
444 ioinst_handle_chsc(cpu, inst >> 16, GETPC());
445 qemu_mutex_unlock_iothread();
447 #endif
449 #ifndef CONFIG_USER_ONLY
450 void HELPER(per_check_exception)(CPUS390XState *env)
452 uint32_t ilen;
454 if (env->per_perc_atmid) {
456 * FIXME: ILEN_AUTO is most probably the right thing to use. ilen
457 * always has to match the instruction referenced in the PSW. E.g.
458 * if a PER interrupt is triggered via EXECUTE, we have to use ilen
459 * of EXECUTE, while per_address contains the target of EXECUTE.
461 ilen = get_ilen(cpu_ldub_code(env, env->per_address));
462 s390_program_interrupt(env, PGM_PER, ilen, GETPC());
466 /* Check if an address is within the PER starting address and the PER
467 ending address. The address range might loop. */
468 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
470 if (env->cregs[10] <= env->cregs[11]) {
471 return env->cregs[10] <= addr && addr <= env->cregs[11];
472 } else {
473 return env->cregs[10] <= addr || addr <= env->cregs[11];
477 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
479 if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
480 if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
481 || get_per_in_range(env, to)) {
482 env->per_address = from;
483 env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
488 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
490 if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
491 env->per_address = addr;
492 env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
494 /* If the instruction has to be nullified, trigger the
495 exception immediately. */
496 if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
497 CPUState *cs = CPU(s390_env_get_cpu(env));
499 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
500 env->int_pgm_code = PGM_PER;
501 env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
503 cs->exception_index = EXCP_PGM;
504 cpu_loop_exit(cs);
508 #endif
510 static uint8_t stfl_bytes[2048];
511 static unsigned int used_stfl_bytes;
513 static void prepare_stfl(void)
515 static bool initialized;
516 int i;
518 /* racy, but we don't care, the same values are always written */
519 if (initialized) {
520 return;
523 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
524 for (i = 0; i < sizeof(stfl_bytes); i++) {
525 if (stfl_bytes[i]) {
526 used_stfl_bytes = i + 1;
529 initialized = true;
532 #ifndef CONFIG_USER_ONLY
533 void HELPER(stfl)(CPUS390XState *env)
535 LowCore *lowcore;
537 lowcore = cpu_map_lowcore(env);
538 prepare_stfl();
539 memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
540 cpu_unmap_lowcore(lowcore);
542 #endif
544 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
546 const uintptr_t ra = GETPC();
547 const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
548 const int max_bytes = ROUND_UP(used_stfl_bytes, 8);
549 int i;
551 if (addr & 0x7) {
552 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
555 prepare_stfl();
556 for (i = 0; i < count_bytes; ++i) {
557 cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
560 env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
561 return count_bytes >= max_bytes ? 0 : 3;