kvmvapic: Prevent reading beyond the end of guest RAM
[qemu.git] / target-s390x / misc_helper.c
blob10d04252d58c9c54b3acaf8e77343a6aecbc2d91
1 /*
2 * S/390 misc helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "exec/memory.h"
23 #include "qemu/host-utils.h"
24 #include "helper.h"
25 #include <string.h>
26 #include "sysemu/kvm.h"
27 #include "qemu/timer.h"
28 #ifdef CONFIG_KVM
29 #include <linux/kvm.h>
30 #endif
32 #if !defined(CONFIG_USER_ONLY)
33 #include "exec/softmmu_exec.h"
34 #include "sysemu/cpus.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/s390x/ebcdic.h"
37 #endif
39 /* #define DEBUG_HELPER */
40 #ifdef DEBUG_HELPER
41 #define HELPER_LOG(x...) qemu_log(x)
42 #else
43 #define HELPER_LOG(x...)
44 #endif
46 /* Raise an exception dynamically from a helper function. */
47 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
48 uintptr_t retaddr)
50 int t;
52 env->exception_index = EXCP_PGM;
53 env->int_pgm_code = excp;
55 /* Use the (ultimate) callers address to find the insn that trapped. */
56 cpu_restore_state(env, retaddr);
58 /* Advance past the insn. */
59 t = cpu_ldub_code(env, env->psw.addr);
60 env->int_pgm_ilen = t = get_ilen(t);
61 env->psw.addr += 2 * t;
63 cpu_loop_exit(env);
66 /* Raise an exception statically from a TB. */
67 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
69 HELPER_LOG("%s: exception %d\n", __func__, excp);
70 env->exception_index = excp;
71 cpu_loop_exit(env);
74 #ifndef CONFIG_USER_ONLY
76 void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
78 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
79 env->psw.addr);
81 if (kvm_enabled()) {
82 #ifdef CONFIG_KVM
83 kvm_s390_interrupt(s390_env_get_cpu(env), KVM_S390_PROGRAM_INT, code);
84 #endif
85 } else {
86 env->int_pgm_code = code;
87 env->int_pgm_ilen = ilen;
88 env->exception_index = EXCP_PGM;
89 cpu_loop_exit(env);
93 /* SCLP service call */
94 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
96 int r = sclp_service_call(r1, r2);
97 if (r < 0) {
98 program_interrupt(env, -r, 4);
99 return 0;
101 return r;
104 #ifndef CONFIG_USER_ONLY
105 static void cpu_reset_all(void)
107 CPUState *cs;
108 S390CPUClass *scc;
110 CPU_FOREACH(cs) {
111 scc = S390_CPU_GET_CLASS(cs);
112 scc->cpu_reset(cs);
116 static void cpu_full_reset_all(void)
118 CPUState *cpu;
120 CPU_FOREACH(cpu) {
121 cpu_reset(cpu);
125 static int modified_clear_reset(S390CPU *cpu)
127 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
129 pause_all_vcpus();
130 cpu_synchronize_all_states();
131 cpu_full_reset_all();
132 io_subsystem_reset();
133 scc->load_normal(CPU(cpu));
134 cpu_synchronize_all_post_reset();
135 resume_all_vcpus();
136 return 0;
139 static int load_normal_reset(S390CPU *cpu)
141 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
143 pause_all_vcpus();
144 cpu_synchronize_all_states();
145 cpu_reset_all();
146 io_subsystem_reset();
147 scc->initial_cpu_reset(CPU(cpu));
148 scc->load_normal(CPU(cpu));
149 cpu_synchronize_all_post_reset();
150 resume_all_vcpus();
151 return 0;
154 #define DIAG_308_RC_NO_CONF 0x0102
155 #define DIAG_308_RC_INVALID 0x0402
156 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
158 uint64_t addr = env->regs[r1];
159 uint64_t subcode = env->regs[r3];
161 if (env->psw.mask & PSW_MASK_PSTATE) {
162 program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC);
163 return;
166 if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
167 program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
168 return;
171 switch (subcode) {
172 case 0:
173 modified_clear_reset(s390_env_get_cpu(env));
174 break;
175 case 1:
176 load_normal_reset(s390_env_get_cpu(env));
177 break;
178 case 5:
179 if ((r1 & 1) || (addr & 0x0fffULL)) {
180 program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
181 return;
183 env->regs[r1+1] = DIAG_308_RC_INVALID;
184 return;
185 case 6:
186 if ((r1 & 1) || (addr & 0x0fffULL)) {
187 program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
188 return;
190 env->regs[r1+1] = DIAG_308_RC_NO_CONF;
191 return;
192 default:
193 hw_error("Unhandled diag308 subcode %" PRIx64, subcode);
194 break;
197 #endif
199 /* DIAG */
200 uint64_t HELPER(diag)(CPUS390XState *env, uint32_t num, uint64_t mem,
201 uint64_t code)
203 uint64_t r;
205 switch (num) {
206 case 0x500:
207 /* KVM hypercall */
208 r = s390_virtio_hypercall(env);
209 break;
210 case 0x44:
211 /* yield */
212 r = 0;
213 break;
214 case 0x308:
215 /* ipl */
216 r = 0;
217 break;
218 default:
219 r = -1;
220 break;
223 if (r) {
224 program_interrupt(env, PGM_OPERATION, ILEN_LATER_INC);
227 return r;
230 /* Set Prefix */
231 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
233 uint32_t prefix = a1 & 0x7fffe000;
234 env->psa = prefix;
235 qemu_log("prefix: %#x\n", prefix);
236 tlb_flush_page(env, 0);
237 tlb_flush_page(env, TARGET_PAGE_SIZE);
240 static inline uint64_t clock_value(CPUS390XState *env)
242 uint64_t time;
244 time = env->tod_offset +
245 time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
247 return time;
250 /* Store Clock */
251 uint64_t HELPER(stck)(CPUS390XState *env)
253 return clock_value(env);
256 /* Set Clock Comparator */
257 void HELPER(sckc)(CPUS390XState *env, uint64_t time)
259 if (time == -1ULL) {
260 return;
263 /* difference between now and then */
264 time -= clock_value(env);
265 /* nanoseconds */
266 time = (time * 125) >> 9;
268 timer_mod(env->tod_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time);
271 /* Store Clock Comparator */
272 uint64_t HELPER(stckc)(CPUS390XState *env)
274 /* XXX implement */
275 return 0;
278 /* Set CPU Timer */
279 void HELPER(spt)(CPUS390XState *env, uint64_t time)
281 if (time == -1ULL) {
282 return;
285 /* nanoseconds */
286 time = (time * 125) >> 9;
288 timer_mod(env->cpu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time);
291 /* Store CPU Timer */
292 uint64_t HELPER(stpt)(CPUS390XState *env)
294 /* XXX implement */
295 return 0;
298 /* Store System Information */
299 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
300 uint64_t r0, uint64_t r1)
302 int cc = 0;
303 int sel1, sel2;
305 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
306 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
307 /* valid function code, invalid reserved bits */
308 program_interrupt(env, PGM_SPECIFICATION, 2);
311 sel1 = r0 & STSI_R0_SEL1_MASK;
312 sel2 = r1 & STSI_R1_SEL2_MASK;
314 /* XXX: spec exception if sysib is not 4k-aligned */
316 switch (r0 & STSI_LEVEL_MASK) {
317 case STSI_LEVEL_1:
318 if ((sel1 == 1) && (sel2 == 1)) {
319 /* Basic Machine Configuration */
320 struct sysib_111 sysib;
322 memset(&sysib, 0, sizeof(sysib));
323 ebcdic_put(sysib.manuf, "QEMU ", 16);
324 /* same as machine type number in STORE CPU ID */
325 ebcdic_put(sysib.type, "QEMU", 4);
326 /* same as model number in STORE CPU ID */
327 ebcdic_put(sysib.model, "QEMU ", 16);
328 ebcdic_put(sysib.sequence, "QEMU ", 16);
329 ebcdic_put(sysib.plant, "QEMU", 4);
330 cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
331 } else if ((sel1 == 2) && (sel2 == 1)) {
332 /* Basic Machine CPU */
333 struct sysib_121 sysib;
335 memset(&sysib, 0, sizeof(sysib));
336 /* XXX make different for different CPUs? */
337 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
338 ebcdic_put(sysib.plant, "QEMU", 4);
339 stw_p(&sysib.cpu_addr, env->cpu_num);
340 cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
341 } else if ((sel1 == 2) && (sel2 == 2)) {
342 /* Basic Machine CPUs */
343 struct sysib_122 sysib;
345 memset(&sysib, 0, sizeof(sysib));
346 stl_p(&sysib.capability, 0x443afc29);
347 /* XXX change when SMP comes */
348 stw_p(&sysib.total_cpus, 1);
349 stw_p(&sysib.active_cpus, 1);
350 stw_p(&sysib.standby_cpus, 0);
351 stw_p(&sysib.reserved_cpus, 0);
352 cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
353 } else {
354 cc = 3;
356 break;
357 case STSI_LEVEL_2:
359 if ((sel1 == 2) && (sel2 == 1)) {
360 /* LPAR CPU */
361 struct sysib_221 sysib;
363 memset(&sysib, 0, sizeof(sysib));
364 /* XXX make different for different CPUs? */
365 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
366 ebcdic_put(sysib.plant, "QEMU", 4);
367 stw_p(&sysib.cpu_addr, env->cpu_num);
368 stw_p(&sysib.cpu_id, 0);
369 cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
370 } else if ((sel1 == 2) && (sel2 == 2)) {
371 /* LPAR CPUs */
372 struct sysib_222 sysib;
374 memset(&sysib, 0, sizeof(sysib));
375 stw_p(&sysib.lpar_num, 0);
376 sysib.lcpuc = 0;
377 /* XXX change when SMP comes */
378 stw_p(&sysib.total_cpus, 1);
379 stw_p(&sysib.conf_cpus, 1);
380 stw_p(&sysib.standby_cpus, 0);
381 stw_p(&sysib.reserved_cpus, 0);
382 ebcdic_put(sysib.name, "QEMU ", 8);
383 stl_p(&sysib.caf, 1000);
384 stw_p(&sysib.dedicated_cpus, 0);
385 stw_p(&sysib.shared_cpus, 0);
386 cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
387 } else {
388 cc = 3;
390 break;
392 case STSI_LEVEL_3:
394 if ((sel1 == 2) && (sel2 == 2)) {
395 /* VM CPUs */
396 struct sysib_322 sysib;
398 memset(&sysib, 0, sizeof(sysib));
399 sysib.count = 1;
400 /* XXX change when SMP comes */
401 stw_p(&sysib.vm[0].total_cpus, 1);
402 stw_p(&sysib.vm[0].conf_cpus, 1);
403 stw_p(&sysib.vm[0].standby_cpus, 0);
404 stw_p(&sysib.vm[0].reserved_cpus, 0);
405 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
406 stl_p(&sysib.vm[0].caf, 1000);
407 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
408 cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
409 } else {
410 cc = 3;
412 break;
414 case STSI_LEVEL_CURRENT:
415 env->regs[0] = STSI_LEVEL_3;
416 break;
417 default:
418 cc = 3;
419 break;
422 return cc;
425 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
426 uint64_t cpu_addr)
428 int cc = 0;
430 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
431 __func__, order_code, r1, cpu_addr);
433 /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
434 as parameter (input). Status (output) is always R1. */
436 switch (order_code) {
437 case SIGP_SET_ARCH:
438 /* switch arch */
439 break;
440 case SIGP_SENSE:
441 /* enumerate CPU status */
442 if (cpu_addr) {
443 /* XXX implement when SMP comes */
444 return 3;
446 env->regs[r1] &= 0xffffffff00000000ULL;
447 cc = 1;
448 break;
449 #if !defined(CONFIG_USER_ONLY)
450 case SIGP_RESTART:
451 qemu_system_reset_request();
452 cpu_loop_exit(env);
453 break;
454 case SIGP_STOP:
455 qemu_system_shutdown_request();
456 cpu_loop_exit(env);
457 break;
458 #endif
459 default:
460 /* unknown sigp */
461 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
462 cc = 3;
465 return cc;
467 #endif