target/s390x: rework PGM interrupt psw.addr handling
[qemu/ar7.git] / target / s390x / misc_helper.c
blob4daa01632e70e87640fb22995031bd5d72dc1618
1 /*
2 * S/390 misc helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "exec/memory.h"
25 #include "qemu/host-utils.h"
26 #include "exec/helper-proto.h"
27 #include "sysemu/kvm.h"
28 #include "qemu/timer.h"
29 #include "qemu/main-loop.h"
30 #include "exec/address-spaces.h"
31 #ifdef CONFIG_KVM
32 #include <linux/kvm.h>
33 #endif
34 #include "exec/exec-all.h"
35 #include "exec/cpu_ldst.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/watchdog/wdt_diag288.h"
39 #include "sysemu/cpus.h"
40 #include "sysemu/sysemu.h"
41 #include "hw/s390x/ebcdic.h"
42 #include "hw/s390x/ipl.h"
43 #endif
45 /* #define DEBUG_HELPER */
46 #ifdef DEBUG_HELPER
47 #define HELPER_LOG(x...) qemu_log(x)
48 #else
49 #define HELPER_LOG(x...)
50 #endif
52 /* Raise an exception dynamically from a helper function. */
53 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
54 uintptr_t retaddr)
56 CPUState *cs = CPU(s390_env_get_cpu(env));
58 cs->exception_index = EXCP_PGM;
59 env->int_pgm_code = excp;
60 env->int_pgm_ilen = ILEN_AUTO;
62 /* Use the (ultimate) callers address to find the insn that trapped. */
63 cpu_restore_state(cs, retaddr);
65 cpu_loop_exit(cs);
68 /* Raise an exception statically from a TB. */
69 void HELPER(exception)(CPUS390XState *env, uint32_t excp)
71 CPUState *cs = CPU(s390_env_get_cpu(env));
73 HELPER_LOG("%s: exception %d\n", __func__, excp);
74 cs->exception_index = excp;
75 cpu_loop_exit(cs);
78 void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
80 S390CPU *cpu = s390_env_get_cpu(env);
82 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
83 env->psw.addr);
85 if (kvm_enabled()) {
86 #ifdef CONFIG_KVM
87 struct kvm_s390_irq irq = {
88 .type = KVM_S390_PROGRAM_INT,
89 .u.pgm.code = code,
92 kvm_s390_vcpu_interrupt(cpu, &irq);
93 #endif
94 } else {
95 CPUState *cs = CPU(cpu);
97 env->int_pgm_code = code;
98 env->int_pgm_ilen = ilen;
99 cs->exception_index = EXCP_PGM;
100 cpu_loop_exit(cs);
104 #ifndef CONFIG_USER_ONLY
106 /* SCLP service call */
107 uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
109 qemu_mutex_lock_iothread();
110 int r = sclp_service_call(env, r1, r2);
111 if (r < 0) {
112 program_interrupt(env, -r, 4);
113 r = 0;
115 qemu_mutex_unlock_iothread();
116 return r;
119 #ifndef CONFIG_USER_ONLY
120 static int modified_clear_reset(S390CPU *cpu)
122 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
123 CPUState *t;
125 pause_all_vcpus();
126 cpu_synchronize_all_states();
127 CPU_FOREACH(t) {
128 run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
130 s390_cmma_reset();
131 subsystem_reset();
132 s390_crypto_reset();
133 scc->load_normal(CPU(cpu));
134 cpu_synchronize_all_post_reset();
135 resume_all_vcpus();
136 return 0;
139 static int load_normal_reset(S390CPU *cpu)
141 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
142 CPUState *t;
144 pause_all_vcpus();
145 cpu_synchronize_all_states();
146 CPU_FOREACH(t) {
147 run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
149 s390_cmma_reset();
150 subsystem_reset();
151 scc->initial_cpu_reset(CPU(cpu));
152 scc->load_normal(CPU(cpu));
153 cpu_synchronize_all_post_reset();
154 resume_all_vcpus();
155 return 0;
158 int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
160 uint64_t func = env->regs[r1];
161 uint64_t timeout = env->regs[r1 + 1];
162 uint64_t action = env->regs[r3];
163 Object *obj;
164 DIAG288State *diag288;
165 DIAG288Class *diag288_class;
167 if (r1 % 2 || action != 0) {
168 return -1;
171 /* Timeout must be more than 15 seconds except for timer deletion */
172 if (func != WDT_DIAG288_CANCEL && timeout < 15) {
173 return -1;
176 obj = object_resolve_path_type("", TYPE_WDT_DIAG288, NULL);
177 if (!obj) {
178 return -1;
181 diag288 = DIAG288(obj);
182 diag288_class = DIAG288_GET_CLASS(diag288);
183 return diag288_class->handle_timer(diag288, func, timeout);
186 #define DIAG_308_RC_OK 0x0001
187 #define DIAG_308_RC_NO_CONF 0x0102
188 #define DIAG_308_RC_INVALID 0x0402
190 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
192 uint64_t addr = env->regs[r1];
193 uint64_t subcode = env->regs[r3];
194 IplParameterBlock *iplb;
196 if (env->psw.mask & PSW_MASK_PSTATE) {
197 program_interrupt(env, PGM_PRIVILEGED, ILEN_AUTO);
198 return;
201 if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
202 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
203 return;
206 switch (subcode) {
207 case 0:
208 modified_clear_reset(s390_env_get_cpu(env));
209 if (tcg_enabled()) {
210 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
212 break;
213 case 1:
214 load_normal_reset(s390_env_get_cpu(env));
215 if (tcg_enabled()) {
216 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
218 break;
219 case 3:
220 s390_reipl_request();
221 if (tcg_enabled()) {
222 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
224 break;
225 case 5:
226 if ((r1 & 1) || (addr & 0x0fffULL)) {
227 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
228 return;
230 if (!address_space_access_valid(&address_space_memory, addr,
231 sizeof(IplParameterBlock), false)) {
232 program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
233 return;
235 iplb = g_malloc0(sizeof(IplParameterBlock));
236 cpu_physical_memory_read(addr, iplb, sizeof(iplb->len));
237 if (!iplb_valid_len(iplb)) {
238 env->regs[r1 + 1] = DIAG_308_RC_INVALID;
239 goto out;
242 cpu_physical_memory_read(addr, iplb, be32_to_cpu(iplb->len));
244 if (!iplb_valid_ccw(iplb) && !iplb_valid_fcp(iplb)) {
245 env->regs[r1 + 1] = DIAG_308_RC_INVALID;
246 goto out;
249 s390_ipl_update_diag308(iplb);
250 env->regs[r1 + 1] = DIAG_308_RC_OK;
251 out:
252 g_free(iplb);
253 return;
254 case 6:
255 if ((r1 & 1) || (addr & 0x0fffULL)) {
256 program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
257 return;
259 if (!address_space_access_valid(&address_space_memory, addr,
260 sizeof(IplParameterBlock), true)) {
261 program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
262 return;
264 iplb = s390_ipl_get_iplb();
265 if (iplb) {
266 cpu_physical_memory_write(addr, iplb, be32_to_cpu(iplb->len));
267 env->regs[r1 + 1] = DIAG_308_RC_OK;
268 } else {
269 env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
271 return;
272 default:
273 hw_error("Unhandled diag308 subcode %" PRIx64, subcode);
274 break;
277 #endif
279 void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
281 uint64_t r;
283 switch (num) {
284 case 0x500:
285 /* KVM hypercall */
286 qemu_mutex_lock_iothread();
287 r = s390_virtio_hypercall(env);
288 qemu_mutex_unlock_iothread();
289 break;
290 case 0x44:
291 /* yield */
292 r = 0;
293 break;
294 case 0x308:
295 /* ipl */
296 handle_diag_308(env, r1, r3);
297 r = 0;
298 break;
299 default:
300 r = -1;
301 break;
304 if (r) {
305 program_interrupt(env, PGM_OPERATION, ILEN_AUTO);
309 /* Set Prefix */
310 void HELPER(spx)(CPUS390XState *env, uint64_t a1)
312 CPUState *cs = CPU(s390_env_get_cpu(env));
313 uint32_t prefix = a1 & 0x7fffe000;
315 env->psa = prefix;
316 HELPER_LOG("prefix: %#x\n", prefix);
317 tlb_flush_page(cs, 0);
318 tlb_flush_page(cs, TARGET_PAGE_SIZE);
321 /* Store Clock */
322 uint64_t HELPER(stck)(CPUS390XState *env)
324 uint64_t time;
326 time = env->tod_offset +
327 time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
329 return time;
332 /* Set Clock Comparator */
333 void HELPER(sckc)(CPUS390XState *env, uint64_t time)
335 if (time == -1ULL) {
336 return;
339 env->ckc = time;
341 /* difference between origins */
342 time -= env->tod_offset;
344 /* nanoseconds */
345 time = tod2time(time);
347 timer_mod(env->tod_timer, env->tod_basetime + time);
350 /* Store Clock Comparator */
351 uint64_t HELPER(stckc)(CPUS390XState *env)
353 return env->ckc;
356 /* Set CPU Timer */
357 void HELPER(spt)(CPUS390XState *env, uint64_t time)
359 if (time == -1ULL) {
360 return;
363 /* nanoseconds */
364 time = tod2time(time);
366 env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
368 timer_mod(env->cpu_timer, env->cputm);
371 /* Store CPU Timer */
372 uint64_t HELPER(stpt)(CPUS390XState *env)
374 return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
377 /* Store System Information */
378 uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
379 uint64_t r0, uint64_t r1)
381 int cc = 0;
382 int sel1, sel2;
384 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
385 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
386 /* valid function code, invalid reserved bits */
387 program_interrupt(env, PGM_SPECIFICATION, 2);
390 sel1 = r0 & STSI_R0_SEL1_MASK;
391 sel2 = r1 & STSI_R1_SEL2_MASK;
393 /* XXX: spec exception if sysib is not 4k-aligned */
395 switch (r0 & STSI_LEVEL_MASK) {
396 case STSI_LEVEL_1:
397 if ((sel1 == 1) && (sel2 == 1)) {
398 /* Basic Machine Configuration */
399 struct sysib_111 sysib;
401 memset(&sysib, 0, sizeof(sysib));
402 ebcdic_put(sysib.manuf, "QEMU ", 16);
403 /* same as machine type number in STORE CPU ID */
404 ebcdic_put(sysib.type, "QEMU", 4);
405 /* same as model number in STORE CPU ID */
406 ebcdic_put(sysib.model, "QEMU ", 16);
407 ebcdic_put(sysib.sequence, "QEMU ", 16);
408 ebcdic_put(sysib.plant, "QEMU", 4);
409 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
410 } else if ((sel1 == 2) && (sel2 == 1)) {
411 /* Basic Machine CPU */
412 struct sysib_121 sysib;
414 memset(&sysib, 0, sizeof(sysib));
415 /* XXX make different for different CPUs? */
416 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
417 ebcdic_put(sysib.plant, "QEMU", 4);
418 stw_p(&sysib.cpu_addr, env->cpu_num);
419 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
420 } else if ((sel1 == 2) && (sel2 == 2)) {
421 /* Basic Machine CPUs */
422 struct sysib_122 sysib;
424 memset(&sysib, 0, sizeof(sysib));
425 stl_p(&sysib.capability, 0x443afc29);
426 /* XXX change when SMP comes */
427 stw_p(&sysib.total_cpus, 1);
428 stw_p(&sysib.active_cpus, 1);
429 stw_p(&sysib.standby_cpus, 0);
430 stw_p(&sysib.reserved_cpus, 0);
431 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
432 } else {
433 cc = 3;
435 break;
436 case STSI_LEVEL_2:
438 if ((sel1 == 2) && (sel2 == 1)) {
439 /* LPAR CPU */
440 struct sysib_221 sysib;
442 memset(&sysib, 0, sizeof(sysib));
443 /* XXX make different for different CPUs? */
444 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
445 ebcdic_put(sysib.plant, "QEMU", 4);
446 stw_p(&sysib.cpu_addr, env->cpu_num);
447 stw_p(&sysib.cpu_id, 0);
448 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
449 } else if ((sel1 == 2) && (sel2 == 2)) {
450 /* LPAR CPUs */
451 struct sysib_222 sysib;
453 memset(&sysib, 0, sizeof(sysib));
454 stw_p(&sysib.lpar_num, 0);
455 sysib.lcpuc = 0;
456 /* XXX change when SMP comes */
457 stw_p(&sysib.total_cpus, 1);
458 stw_p(&sysib.conf_cpus, 1);
459 stw_p(&sysib.standby_cpus, 0);
460 stw_p(&sysib.reserved_cpus, 0);
461 ebcdic_put(sysib.name, "QEMU ", 8);
462 stl_p(&sysib.caf, 1000);
463 stw_p(&sysib.dedicated_cpus, 0);
464 stw_p(&sysib.shared_cpus, 0);
465 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
466 } else {
467 cc = 3;
469 break;
471 case STSI_LEVEL_3:
473 if ((sel1 == 2) && (sel2 == 2)) {
474 /* VM CPUs */
475 struct sysib_322 sysib;
477 memset(&sysib, 0, sizeof(sysib));
478 sysib.count = 1;
479 /* XXX change when SMP comes */
480 stw_p(&sysib.vm[0].total_cpus, 1);
481 stw_p(&sysib.vm[0].conf_cpus, 1);
482 stw_p(&sysib.vm[0].standby_cpus, 0);
483 stw_p(&sysib.vm[0].reserved_cpus, 0);
484 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
485 stl_p(&sysib.vm[0].caf, 1000);
486 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
487 cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
488 } else {
489 cc = 3;
491 break;
493 case STSI_LEVEL_CURRENT:
494 env->regs[0] = STSI_LEVEL_3;
495 break;
496 default:
497 cc = 3;
498 break;
501 return cc;
504 uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
505 uint64_t cpu_addr)
507 int cc = SIGP_CC_ORDER_CODE_ACCEPTED;
509 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
510 __func__, order_code, r1, cpu_addr);
512 /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
513 as parameter (input). Status (output) is always R1. */
515 switch (order_code & SIGP_ORDER_MASK) {
516 case SIGP_SET_ARCH:
517 /* switch arch */
518 break;
519 case SIGP_SENSE:
520 /* enumerate CPU status */
521 if (cpu_addr) {
522 /* XXX implement when SMP comes */
523 return 3;
525 env->regs[r1] &= 0xffffffff00000000ULL;
526 cc = 1;
527 break;
528 #if !defined(CONFIG_USER_ONLY)
529 case SIGP_RESTART:
530 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
531 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
532 break;
533 case SIGP_STOP:
534 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
535 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
536 break;
537 #endif
538 default:
539 /* unknown sigp */
540 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
541 cc = SIGP_CC_NOT_OPERATIONAL;
544 return cc;
546 #endif
548 #ifndef CONFIG_USER_ONLY
549 void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
551 S390CPU *cpu = s390_env_get_cpu(env);
552 qemu_mutex_lock_iothread();
553 ioinst_handle_xsch(cpu, r1);
554 qemu_mutex_unlock_iothread();
557 void HELPER(csch)(CPUS390XState *env, uint64_t r1)
559 S390CPU *cpu = s390_env_get_cpu(env);
560 qemu_mutex_lock_iothread();
561 ioinst_handle_csch(cpu, r1);
562 qemu_mutex_unlock_iothread();
565 void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
567 S390CPU *cpu = s390_env_get_cpu(env);
568 qemu_mutex_lock_iothread();
569 ioinst_handle_hsch(cpu, r1);
570 qemu_mutex_unlock_iothread();
573 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
575 S390CPU *cpu = s390_env_get_cpu(env);
576 qemu_mutex_lock_iothread();
577 ioinst_handle_msch(cpu, r1, inst >> 16);
578 qemu_mutex_unlock_iothread();
581 void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
583 S390CPU *cpu = s390_env_get_cpu(env);
584 qemu_mutex_lock_iothread();
585 ioinst_handle_rchp(cpu, r1);
586 qemu_mutex_unlock_iothread();
589 void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
591 S390CPU *cpu = s390_env_get_cpu(env);
592 qemu_mutex_lock_iothread();
593 ioinst_handle_rsch(cpu, r1);
594 qemu_mutex_unlock_iothread();
597 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
599 S390CPU *cpu = s390_env_get_cpu(env);
600 qemu_mutex_lock_iothread();
601 ioinst_handle_ssch(cpu, r1, inst >> 16);
602 qemu_mutex_unlock_iothread();
605 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
607 S390CPU *cpu = s390_env_get_cpu(env);
608 qemu_mutex_lock_iothread();
609 ioinst_handle_stsch(cpu, r1, inst >> 16);
610 qemu_mutex_unlock_iothread();
613 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
615 S390CPU *cpu = s390_env_get_cpu(env);
616 qemu_mutex_lock_iothread();
617 ioinst_handle_tsch(cpu, r1, inst >> 16);
618 qemu_mutex_unlock_iothread();
621 void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
623 S390CPU *cpu = s390_env_get_cpu(env);
624 qemu_mutex_lock_iothread();
625 ioinst_handle_chsc(cpu, inst >> 16);
626 qemu_mutex_unlock_iothread();
628 #endif
630 #ifndef CONFIG_USER_ONLY
631 void HELPER(per_check_exception)(CPUS390XState *env)
633 CPUState *cs = CPU(s390_env_get_cpu(env));
635 if (env->per_perc_atmid) {
636 env->int_pgm_code = PGM_PER;
637 env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, env->per_address));
639 cs->exception_index = EXCP_PGM;
640 cpu_loop_exit(cs);
644 void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
646 if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
647 if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
648 || get_per_in_range(env, to)) {
649 env->per_address = from;
650 env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
655 void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
657 if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
658 env->per_address = addr;
659 env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
661 /* If the instruction has to be nullified, trigger the
662 exception immediately. */
663 if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
664 CPUState *cs = CPU(s390_env_get_cpu(env));
666 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
667 env->int_pgm_code = PGM_PER;
668 env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
670 cs->exception_index = EXCP_PGM;
671 cpu_loop_exit(cs);
675 #endif
677 /* The maximum bit defined at the moment is 129. */
678 #define MAX_STFL_WORDS 3
680 /* Canonicalize the current cpu's features into the 64-bit words required
681 by STFLE. Return the index-1 of the max word that is non-zero. */
682 static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS])
684 S390CPU *cpu = s390_env_get_cpu(env);
685 const unsigned long *features = cpu->model->features;
686 unsigned max_bit = 0;
687 S390Feat feat;
689 memset(words, 0, sizeof(uint64_t) * MAX_STFL_WORDS);
691 if (test_bit(S390_FEAT_ZARCH, features)) {
692 /* z/Architecture is always active if around */
693 words[0] = 1ull << (63 - 2);
696 for (feat = find_first_bit(features, S390_FEAT_MAX);
697 feat < S390_FEAT_MAX;
698 feat = find_next_bit(features, S390_FEAT_MAX, feat + 1)) {
699 const S390FeatDef *def = s390_feat_def(feat);
700 if (def->type == S390_FEAT_TYPE_STFL) {
701 unsigned bit = def->bit;
702 if (bit > max_bit) {
703 max_bit = bit;
705 assert(bit / 64 < MAX_STFL_WORDS);
706 words[bit / 64] |= 1ULL << (63 - bit % 64);
710 return max_bit / 64;
713 void HELPER(stfl)(CPUS390XState *env)
715 uint64_t words[MAX_STFL_WORDS];
717 do_stfle(env, words);
718 cpu_stl_data(env, 200, words[0] >> 32);
721 uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
723 uint64_t words[MAX_STFL_WORDS];
724 unsigned count_m1 = env->regs[0] & 0xff;
725 unsigned max_m1 = do_stfle(env, words);
726 unsigned i;
728 for (i = 0; i <= count_m1; ++i) {
729 cpu_stq_data(env, addr + 8 * i, words[i]);
732 env->regs[0] = deposit64(env->regs[0], 0, 8, max_m1);
733 return (count_m1 >= max_m1 ? 0 : 3);