2 * S/390 condition code helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "tcg_s390x.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/host-utils.h"
29 /* #define DEBUG_HELPER */
31 #define HELPER_LOG(x...) qemu_log(x)
33 #define HELPER_LOG(x...)
36 static uint32_t cc_calc_ltgt_32(int32_t src
, int32_t dst
)
40 } else if (src
< dst
) {
47 static uint32_t cc_calc_ltgt0_32(int32_t dst
)
49 return cc_calc_ltgt_32(dst
, 0);
52 static uint32_t cc_calc_ltgt_64(int64_t src
, int64_t dst
)
56 } else if (src
< dst
) {
63 static uint32_t cc_calc_ltgt0_64(int64_t dst
)
65 return cc_calc_ltgt_64(dst
, 0);
68 static uint32_t cc_calc_ltugtu_32(uint32_t src
, uint32_t dst
)
72 } else if (src
< dst
) {
79 static uint32_t cc_calc_ltugtu_64(uint64_t src
, uint64_t dst
)
83 } else if (src
< dst
) {
90 static uint32_t cc_calc_tm_32(uint32_t val
, uint32_t mask
)
92 uint32_t r
= val
& mask
;
96 } else if (r
== mask
) {
103 static uint32_t cc_calc_tm_64(uint64_t val
, uint64_t mask
)
105 uint64_t r
= val
& mask
;
109 } else if (r
== mask
) {
112 int top
= clz64(mask
);
113 if ((int64_t)(val
<< top
) < 0) {
121 static uint32_t cc_calc_nz(uint64_t dst
)
126 static uint32_t cc_calc_addu(uint64_t carry_out
, uint64_t result
)
128 g_assert(carry_out
<= 1);
129 return (result
!= 0) + 2 * carry_out
;
132 static uint32_t cc_calc_subu(uint64_t borrow_out
, uint64_t result
)
134 return cc_calc_addu(borrow_out
+ 1, result
);
137 static uint32_t cc_calc_add_64(int64_t a1
, int64_t a2
, int64_t ar
)
139 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
140 return 3; /* overflow */
152 static uint32_t cc_calc_sub_64(int64_t a1
, int64_t a2
, int64_t ar
)
154 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
155 return 3; /* overflow */
167 static uint32_t cc_calc_subb_64(uint64_t a1
, uint64_t a2
, uint64_t ar
)
171 if (ar
!= a1
- a2
) { /* difference means borrow-in */
172 borrow_out
= (a2
>= a1
);
174 borrow_out
= (a2
> a1
);
177 return (ar
!= 0) + 2 * !borrow_out
;
180 static uint32_t cc_calc_abs_64(int64_t dst
)
182 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
191 static uint32_t cc_calc_nabs_64(int64_t dst
)
196 static uint32_t cc_calc_comp_64(int64_t dst
)
198 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
200 } else if (dst
< 0) {
202 } else if (dst
> 0) {
210 static uint32_t cc_calc_add_32(int32_t a1
, int32_t a2
, int32_t ar
)
212 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
213 return 3; /* overflow */
225 static uint32_t cc_calc_sub_32(int32_t a1
, int32_t a2
, int32_t ar
)
227 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
228 return 3; /* overflow */
240 static uint32_t cc_calc_subb_32(uint32_t a1
, uint32_t a2
, uint32_t ar
)
244 if (ar
!= a1
- a2
) { /* difference means borrow-in */
245 borrow_out
= (a2
>= a1
);
247 borrow_out
= (a2
> a1
);
250 return (ar
!= 0) + 2 * !borrow_out
;
253 static uint32_t cc_calc_abs_32(int32_t dst
)
255 if ((uint32_t)dst
== 0x80000000UL
) {
264 static uint32_t cc_calc_nabs_32(int32_t dst
)
269 static uint32_t cc_calc_comp_32(int32_t dst
)
271 if ((uint32_t)dst
== 0x80000000UL
) {
273 } else if (dst
< 0) {
275 } else if (dst
> 0) {
282 /* calculate condition code for insert character under mask insn */
283 static uint32_t cc_calc_icm(uint64_t mask
, uint64_t val
)
285 if ((val
& mask
) == 0) {
288 int top
= clz64(mask
);
289 if ((int64_t)(val
<< top
) < 0) {
297 static uint32_t cc_calc_sla_32(uint32_t src
, int shift
)
299 uint32_t mask
= ((1U << shift
) - 1U) << (32 - shift
);
300 uint32_t sign
= 1U << 31;
304 /* Check if the sign bit stays the same. */
310 if ((src
& mask
) != match
) {
315 r
= ((src
<< shift
) & ~sign
) | (src
& sign
);
324 static uint32_t cc_calc_sla_64(uint64_t src
, int shift
)
326 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
327 uint64_t sign
= 1ULL << 63;
331 /* Check if the sign bit stays the same. */
337 if ((src
& mask
) != match
) {
342 r
= ((src
<< shift
) & ~sign
) | (src
& sign
);
351 static uint32_t cc_calc_flogr(uint64_t dst
)
356 static uint32_t cc_calc_lcbb(uint64_t dst
)
358 return dst
== 16 ? 0 : 3;
361 static uint32_t cc_calc_vc(uint64_t low
, uint64_t high
)
363 if (high
== -1ull && low
== -1ull) {
364 /* all elements match */
366 } else if (high
== 0 && low
== 0) {
367 /* no elements match */
370 /* some elements but not all match */
375 static uint32_t cc_calc_muls_32(int64_t res
)
377 const int64_t tmp
= res
>> 31;
381 } else if (tmp
&& tmp
!= -1) {
383 } else if (res
< 0) {
389 static uint64_t cc_calc_muls_64(int64_t res_high
, uint64_t res_low
)
391 if (!res_high
&& !res_low
) {
393 } else if (res_high
+ (res_low
>> 63) != 0) {
395 } else if (res_high
< 0) {
401 static uint32_t do_calc_cc(CPUS390XState
*env
, uint32_t cc_op
,
402 uint64_t src
, uint64_t dst
, uint64_t vr
)
411 /* cc_op value _is_ cc */
415 r
= cc_calc_ltgt0_32(dst
);
418 r
= cc_calc_ltgt0_64(dst
);
421 r
= cc_calc_ltgt_32(src
, dst
);
424 r
= cc_calc_ltgt_64(src
, dst
);
426 case CC_OP_LTUGTU_32
:
427 r
= cc_calc_ltugtu_32(src
, dst
);
429 case CC_OP_LTUGTU_64
:
430 r
= cc_calc_ltugtu_64(src
, dst
);
433 r
= cc_calc_tm_32(src
, dst
);
436 r
= cc_calc_tm_64(src
, dst
);
442 r
= cc_calc_addu(src
, dst
);
445 r
= cc_calc_subu(src
, dst
);
448 r
= cc_calc_add_64(src
, dst
, vr
);
451 r
= cc_calc_sub_64(src
, dst
, vr
);
454 r
= cc_calc_subb_64(src
, dst
, vr
);
457 r
= cc_calc_abs_64(dst
);
460 r
= cc_calc_nabs_64(dst
);
463 r
= cc_calc_comp_64(dst
);
466 r
= cc_calc_muls_64(src
, dst
);
470 r
= cc_calc_add_32(src
, dst
, vr
);
473 r
= cc_calc_sub_32(src
, dst
, vr
);
476 r
= cc_calc_subb_32(src
, dst
, vr
);
479 r
= cc_calc_abs_32(dst
);
482 r
= cc_calc_nabs_32(dst
);
485 r
= cc_calc_comp_32(dst
);
488 r
= cc_calc_muls_32(dst
);
492 r
= cc_calc_icm(src
, dst
);
495 r
= cc_calc_sla_32(src
, dst
);
498 r
= cc_calc_sla_64(src
, dst
);
501 r
= cc_calc_flogr(dst
);
504 r
= cc_calc_lcbb(dst
);
507 r
= cc_calc_vc(src
, dst
);
511 r
= set_cc_nz_f32(dst
);
514 r
= set_cc_nz_f64(dst
);
517 r
= set_cc_nz_f128(make_float128(src
, dst
));
521 cpu_abort(env_cpu(env
), "Unknown CC operation: %s\n", cc_name(cc_op
));
524 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__
,
525 cc_name(cc_op
), src
, dst
, vr
, r
);
529 uint32_t calc_cc(CPUS390XState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
532 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
535 uint32_t HELPER(calc_cc
)(CPUS390XState
*env
, uint32_t cc_op
, uint64_t src
,
536 uint64_t dst
, uint64_t vr
)
538 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
541 #ifndef CONFIG_USER_ONLY
542 void HELPER(load_psw
)(CPUS390XState
*env
, uint64_t mask
, uint64_t addr
)
544 load_psw(env
, mask
, addr
);
545 cpu_loop_exit(env_cpu(env
));
548 void HELPER(sacf
)(CPUS390XState
*env
, uint64_t a1
)
550 HELPER_LOG("%s: %16" PRIx64
"\n", __func__
, a1
);
552 switch (a1
& 0xf00) {
554 env
->psw
.mask
&= ~PSW_MASK_ASC
;
555 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
558 env
->psw
.mask
&= ~PSW_MASK_ASC
;
559 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
562 env
->psw
.mask
&= ~PSW_MASK_ASC
;
563 env
->psw
.mask
|= PSW_ASC_HOME
;
566 HELPER_LOG("unknown sacf mode: %" PRIx64
"\n", a1
);
567 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, GETPC());