highbank: validate register offset before access
[qemu/ar7.git] / target / s390x / misc_helper.c
blob4afd90b96948b059ee3a98270cc10e9c6cd0b374
1 /*
2 * S/390 misc helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internal.h"
25 #include "exec/memory.h"
26 #include "qemu/host-utils.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/timer.h"
29 #include "exec/address-spaces.h"
30 #include "exec/exec-all.h"
31 #include "exec/cpu_ldst.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "sysemu/cpus.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/s390x/ebcdic.h"
37 #include "hw/s390x/s390-virtio-hcall.h"
38 #include "hw/s390x/sclp.h"
39 #endif
41 /* #define DEBUG_HELPER */
42 #ifdef DEBUG_HELPER
43 #define HELPER_LOG(x...) qemu_log(x)
44 #else
45 #define HELPER_LOG(x...)
46 #endif
48 /* Raise an exception dynamically from a helper function. */
49 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
50 uintptr_t retaddr)
52 CPUState *cs = CPU(s390_env_get_cpu(env));
54 cs->exception_index = EXCP_PGM;
55 env->int_pgm_code = excp;
56 env->int_pgm_ilen = ILEN_AUTO;
58 /* Use the (ultimate) callers address to find the insn that trapped. */
59 cpu_restore_state(cs, retaddr);
61 cpu_loop_exit(cs);
64 /* Raise an exception statically from a TB. */
65 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
67 CPUState *cs = CPU(s390_env_get_cpu(env));
69 HELPER_LOG("%s: exception %d\n", __func__, excp);
70 cs->exception_index = excp;
71 cpu_loop_exit(cs);
74 #ifndef CONFIG_USER_ONLY
76 /* SCLP service call */
77 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
79 qemu_mutex_lock_iothread();
80 int r = sclp_service_call(env, r1, r2);
81 if (r < 0) {
82 program_interrupt(env, -r, 4);
83 r = 0;
85 qemu_mutex_unlock_iothread();
86 return r;
89 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
91 uint64_t r;
93 switch (num) {
94 case 0x500:
95 /* KVM hypercall */
96 qemu_mutex_lock_iothread();
97 r = s390_virtio_hypercall(env);
98 qemu_mutex_unlock_iothread();
99 break;
100 case 0x44:
101 /* yield */
102 r = 0;
103 break;
104 case 0x308:
105 /* ipl */
106 handle_diag_308(env, r1, r3);
107 r = 0;
108 break;
109 case 0x288:
110 /* time bomb (watchdog) */
111 r = handle_diag_288(env, r1, r3);
112 break;
113 default:
114 r = -1;
115 break;
118 if (r) {
119 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
123 /* Set Prefix */
124 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
126 CPUState *cs = CPU(s390_env_get_cpu(env));
127 uint32_t prefix = a1 & 0x7fffe000;
129 env->psa = prefix;
130 HELPER_LOG("prefix: %#x\n", prefix);
131 tlb_flush_page(cs, 0);
132 tlb_flush_page(cs, TARGET_PAGE_SIZE);
135 /* Store Clock */
136 uint64_t HELPER(stck)(CPUS390XState *env)
138 uint64_t time;
140 time = env->tod_offset +
141 time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
143 return time;
146 /* Set Clock Comparator */
147 void HELPER(sckc)(CPUS390XState *env, uint64_t time)
149 if (time == -1ULL) {
150 return;
153 env->ckc = time;
155 /* difference between origins */
156 time -= env->tod_offset;
158 /* nanoseconds */
159 time = tod2time(time);
161 timer_mod(env->tod_timer, env->tod_basetime + time);
164 /* Store Clock Comparator */
165 uint64_t HELPER(stckc)(CPUS390XState *env)
167 return env->ckc;
170 /* Set CPU Timer */
171 void HELPER(spt)(CPUS390XState *env, uint64_t time)
173 if (time == -1ULL) {
174 return;
177 /* nanoseconds */
178 time = tod2time(time);
180 env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
182 timer_mod(env->cpu_timer, env->cputm);
185 /* Store CPU Timer */
186 uint64_t HELPER(stpt)(CPUS390XState *env)
188 return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
191 /* Store System Information */
192 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
193 uint64_t r0, uint64_t r1)
195 S390CPU *cpu = s390_env_get_cpu(env);
196 int cc = 0;
197 int sel1, sel2;
199 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
200 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
201 /* valid function code, invalid reserved bits */
202 program_interrupt(env, PGM_SPECIFICATION, 4);
205 sel1 = r0 & STSI_R0_SEL1_MASK;
206 sel2 = r1 & STSI_R1_SEL2_MASK;
208 /* XXX: spec exception if sysib is not 4k-aligned */
210 switch (r0 & STSI_LEVEL_MASK) {
211 case STSI_LEVEL_1:
212 if ((sel1 == 1) && (sel2 == 1)) {
213 /* Basic Machine Configuration */
214 struct sysib_111 sysib;
215 char type[5] = {};
217 memset(&sysib, 0, sizeof(sysib));
218 ebcdic_put(sysib.manuf, "QEMU ", 16);
219 /* same as machine type number in STORE CPU ID, but in EBCDIC */
220 snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
221 ebcdic_put(sysib.type, type, 4);
222 /* model number (not stored in STORE CPU ID for z/Architecure) */
223 ebcdic_put(sysib.model, "QEMU ", 16);
224 ebcdic_put(sysib.sequence, "QEMU ", 16);
225 ebcdic_put(sysib.plant, "QEMU", 4);
226 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
227 } else if ((sel1 == 2) && (sel2 == 1)) {
228 /* Basic Machine CPU */
229 struct sysib_121 sysib;
231 memset(&sysib, 0, sizeof(sysib));
232 /* XXX make different for different CPUs? */
233 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
234 ebcdic_put(sysib.plant, "QEMU", 4);
235 stw_p(&sysib.cpu_addr, env->core_id);
236 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
237 } else if ((sel1 == 2) && (sel2 == 2)) {
238 /* Basic Machine CPUs */
239 struct sysib_122 sysib;
241 memset(&sysib, 0, sizeof(sysib));
242 stl_p(&sysib.capability, 0x443afc29);
243 /* XXX change when SMP comes */
244 stw_p(&sysib.total_cpus, 1);
245 stw_p(&sysib.active_cpus, 1);
246 stw_p(&sysib.standby_cpus, 0);
247 stw_p(&sysib.reserved_cpus, 0);
248 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
249 } else {
250 cc = 3;
252 break;
253 case STSI_LEVEL_2:
255 if ((sel1 == 2) && (sel2 == 1)) {
256 /* LPAR CPU */
257 struct sysib_221 sysib;
259 memset(&sysib, 0, sizeof(sysib));
260 /* XXX make different for different CPUs? */
261 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
262 ebcdic_put(sysib.plant, "QEMU", 4);
263 stw_p(&sysib.cpu_addr, env->core_id);
264 stw_p(&sysib.cpu_id, 0);
265 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
266 } else if ((sel1 == 2) && (sel2 == 2)) {
267 /* LPAR CPUs */
268 struct sysib_222 sysib;
270 memset(&sysib, 0, sizeof(sysib));
271 stw_p(&sysib.lpar_num, 0);
272 sysib.lcpuc = 0;
273 /* XXX change when SMP comes */
274 stw_p(&sysib.total_cpus, 1);
275 stw_p(&sysib.conf_cpus, 1);
276 stw_p(&sysib.standby_cpus, 0);
277 stw_p(&sysib.reserved_cpus, 0);
278 ebcdic_put(sysib.name, "QEMU ", 8);
279 stl_p(&sysib.caf, 1000);
280 stw_p(&sysib.dedicated_cpus, 0);
281 stw_p(&sysib.shared_cpus, 0);
282 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
283 } else {
284 cc = 3;
286 break;
288 case STSI_LEVEL_3:
290 if ((sel1 == 2) && (sel2 == 2)) {
291 /* VM CPUs */
292 struct sysib_322 sysib;
294 memset(&sysib, 0, sizeof(sysib));
295 sysib.count = 1;
296 /* XXX change when SMP comes */
297 stw_p(&sysib.vm[0].total_cpus, 1);
298 stw_p(&sysib.vm[0].conf_cpus, 1);
299 stw_p(&sysib.vm[0].standby_cpus, 0);
300 stw_p(&sysib.vm[0].reserved_cpus, 0);
301 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
302 stl_p(&sysib.vm[0].caf, 1000);
303 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
304 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
305 } else {
306 cc = 3;
308 break;
310 case STSI_LEVEL_CURRENT:
311 env->regs[0] = STSI_LEVEL_3;
312 break;
313 default:
314 cc = 3;
315 break;
318 return cc;
321 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
322 uint32_t r3)
324 int cc;
326 /* TODO: needed to inject interrupts - push further down */
327 qemu_mutex_lock_iothread();
328 cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
329 qemu_mutex_unlock_iothread();
331 return cc;
333 #endif
335 #ifndef CONFIG_USER_ONLY
336 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
338 S390CPU *cpu = s390_env_get_cpu(env);
339 qemu_mutex_lock_iothread();
340 ioinst_handle_xsch(cpu, r1);
341 qemu_mutex_unlock_iothread();
344 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
346 S390CPU *cpu = s390_env_get_cpu(env);
347 qemu_mutex_lock_iothread();
348 ioinst_handle_csch(cpu, r1);
349 qemu_mutex_unlock_iothread();
352 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
354 S390CPU *cpu = s390_env_get_cpu(env);
355 qemu_mutex_lock_iothread();
356 ioinst_handle_hsch(cpu, r1);
357 qemu_mutex_unlock_iothread();
360 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
362 S390CPU *cpu = s390_env_get_cpu(env);
363 qemu_mutex_lock_iothread();
364 ioinst_handle_msch(cpu, r1, inst >> 16);
365 qemu_mutex_unlock_iothread();
368 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
370 S390CPU *cpu = s390_env_get_cpu(env);
371 qemu_mutex_lock_iothread();
372 ioinst_handle_rchp(cpu, r1);
373 qemu_mutex_unlock_iothread();
376 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
378 S390CPU *cpu = s390_env_get_cpu(env);
379 qemu_mutex_lock_iothread();
380 ioinst_handle_rsch(cpu, r1);
381 qemu_mutex_unlock_iothread();
384 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
386 S390CPU *cpu = s390_env_get_cpu(env);
387 qemu_mutex_lock_iothread();
388 ioinst_handle_ssch(cpu, r1, inst >> 16);
389 qemu_mutex_unlock_iothread();
392 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
394 S390CPU *cpu = s390_env_get_cpu(env);
395 qemu_mutex_lock_iothread();
396 ioinst_handle_stsch(cpu, r1, inst >> 16);
397 qemu_mutex_unlock_iothread();
400 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
402 S390CPU *cpu = s390_env_get_cpu(env);
403 qemu_mutex_lock_iothread();
404 ioinst_handle_tsch(cpu, r1, inst >> 16);
405 qemu_mutex_unlock_iothread();
408 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
410 S390CPU *cpu = s390_env_get_cpu(env);
411 qemu_mutex_lock_iothread();
412 ioinst_handle_chsc(cpu, inst >> 16);
413 qemu_mutex_unlock_iothread();
415 #endif
417 #ifndef CONFIG_USER_ONLY
418 void HELPER(per_check_exception)(CPUS390XState *env)
420 uint32_t ilen;
422 if (env->per_perc_atmid) {
424 * FIXME: ILEN_AUTO is most probably the right thing to use. ilen
425 * always has to match the instruction referenced in the PSW. E.g.
426 * if a PER interrupt is triggered via EXECUTE, we have to use ilen
427 * of EXECUTE, while per_address contains the target of EXECUTE.
429 ilen = get_ilen(cpu_ldub_code(env, env->per_address));
430 program_interrupt(env, PGM_PER, ilen);
434 /* Check if an address is within the PER starting address and the PER
435 ending address. The address range might loop. */
436 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
438 if (env->cregs[10] <= env->cregs[11]) {
439 return env->cregs[10] <= addr && addr <= env->cregs[11];
440 } else {
441 return env->cregs[10] <= addr || addr <= env->cregs[11];
445 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
447 if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
448 if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
449 || get_per_in_range(env, to)) {
450 env->per_address = from;
451 env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
456 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
458 if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
459 env->per_address = addr;
460 env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
462 /* If the instruction has to be nullified, trigger the
463 exception immediately. */
464 if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
465 CPUState *cs = CPU(s390_env_get_cpu(env));
467 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
468 env->int_pgm_code = PGM_PER;
469 env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
471 cs->exception_index = EXCP_PGM;
472 cpu_loop_exit(cs);
476 #endif
478 static uint8_t stfl_bytes[2048];
479 static unsigned int used_stfl_bytes;
481 static void prepare_stfl(void)
483 static bool initialized;
484 int i;
486 /* racy, but we don't care, the same values are always written */
487 if (initialized) {
488 return;
491 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
492 for (i = 0; i < sizeof(stfl_bytes); i++) {
493 if (stfl_bytes[i]) {
494 used_stfl_bytes = i + 1;
497 initialized = true;
500 #ifndef CONFIG_USER_ONLY
501 void HELPER(stfl)(CPUS390XState *env)
503 LowCore *lowcore;
505 lowcore = cpu_map_lowcore(env);
506 prepare_stfl();
507 memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
508 cpu_unmap_lowcore(lowcore);
510 #endif
512 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
514 const uintptr_t ra = GETPC();
515 const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
516 const int max_bytes = ROUND_UP(used_stfl_bytes, 8);
517 int i;
519 if (addr & 0x7) {
520 cpu_restore_state(ENV_GET_CPU(env), ra);
521 program_interrupt(env, PGM_SPECIFICATION, 4);
524 prepare_stfl();
525 for (i = 0; i < count_bytes; ++i) {
526 cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
529 env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
530 return count_bytes >= max_bytes ? 0 : 3;