Merge remote-tracking branch 'remotes/amarkovic/tags/mips-queue-jun-26-2019' into...
[qemu/ar7.git] / target / s390x / cc_helper.c
blobcf687927336cd2d9239ca5fde2399ab3200f47e5
1 /*
2 * S/390 condition code helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/exec-all.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/host-utils.h"
28 /* #define DEBUG_HELPER */
29 #ifdef DEBUG_HELPER
30 #define HELPER_LOG(x...) qemu_log(x)
31 #else
32 #define HELPER_LOG(x...)
33 #endif
35 static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
37 if (src == dst) {
38 return 0;
39 } else if (src < dst) {
40 return 1;
41 } else {
42 return 2;
46 static uint32_t cc_calc_ltgt0_32(int32_t dst)
48 return cc_calc_ltgt_32(dst, 0);
51 static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
53 if (src == dst) {
54 return 0;
55 } else if (src < dst) {
56 return 1;
57 } else {
58 return 2;
62 static uint32_t cc_calc_ltgt0_64(int64_t dst)
64 return cc_calc_ltgt_64(dst, 0);
67 static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
69 if (src == dst) {
70 return 0;
71 } else if (src < dst) {
72 return 1;
73 } else {
74 return 2;
78 static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
80 if (src == dst) {
81 return 0;
82 } else if (src < dst) {
83 return 1;
84 } else {
85 return 2;
89 static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
91 uint32_t r = val & mask;
93 if (r == 0) {
94 return 0;
95 } else if (r == mask) {
96 return 3;
97 } else {
98 return 1;
102 static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
104 uint64_t r = val & mask;
106 if (r == 0) {
107 return 0;
108 } else if (r == mask) {
109 return 3;
110 } else {
111 int top = clz64(mask);
112 if ((int64_t)(val << top) < 0) {
113 return 2;
114 } else {
115 return 1;
120 static uint32_t cc_calc_nz(uint64_t dst)
122 return !!dst;
125 static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
127 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
128 return 3; /* overflow */
129 } else {
130 if (ar < 0) {
131 return 1;
132 } else if (ar > 0) {
133 return 2;
134 } else {
135 return 0;
140 static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
142 return (ar != 0) + 2 * (ar < a1);
145 static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
147 /* Recover a2 + carry_in. */
148 uint64_t a2c = ar - a1;
149 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
150 int carry_out = (a2c < a2) || (ar < a1);
152 return (ar != 0) + 2 * carry_out;
155 static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
157 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
158 return 3; /* overflow */
159 } else {
160 if (ar < 0) {
161 return 1;
162 } else if (ar > 0) {
163 return 2;
164 } else {
165 return 0;
170 static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
172 if (ar == 0) {
173 return 2;
174 } else {
175 if (a2 > a1) {
176 return 1;
177 } else {
178 return 3;
183 static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
185 int borrow_out;
187 if (ar != a1 - a2) { /* difference means borrow-in */
188 borrow_out = (a2 >= a1);
189 } else {
190 borrow_out = (a2 > a1);
193 return (ar != 0) + 2 * !borrow_out;
196 static uint32_t cc_calc_abs_64(int64_t dst)
198 if ((uint64_t)dst == 0x8000000000000000ULL) {
199 return 3;
200 } else if (dst) {
201 return 2;
202 } else {
203 return 0;
207 static uint32_t cc_calc_nabs_64(int64_t dst)
209 return !!dst;
212 static uint32_t cc_calc_comp_64(int64_t dst)
214 if ((uint64_t)dst == 0x8000000000000000ULL) {
215 return 3;
216 } else if (dst < 0) {
217 return 1;
218 } else if (dst > 0) {
219 return 2;
220 } else {
221 return 0;
226 static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
228 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
229 return 3; /* overflow */
230 } else {
231 if (ar < 0) {
232 return 1;
233 } else if (ar > 0) {
234 return 2;
235 } else {
236 return 0;
241 static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
243 return (ar != 0) + 2 * (ar < a1);
246 static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
248 /* Recover a2 + carry_in. */
249 uint32_t a2c = ar - a1;
250 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
251 int carry_out = (a2c < a2) || (ar < a1);
253 return (ar != 0) + 2 * carry_out;
256 static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
258 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
259 return 3; /* overflow */
260 } else {
261 if (ar < 0) {
262 return 1;
263 } else if (ar > 0) {
264 return 2;
265 } else {
266 return 0;
271 static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
273 if (ar == 0) {
274 return 2;
275 } else {
276 if (a2 > a1) {
277 return 1;
278 } else {
279 return 3;
284 static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
286 int borrow_out;
288 if (ar != a1 - a2) { /* difference means borrow-in */
289 borrow_out = (a2 >= a1);
290 } else {
291 borrow_out = (a2 > a1);
294 return (ar != 0) + 2 * !borrow_out;
297 static uint32_t cc_calc_abs_32(int32_t dst)
299 if ((uint32_t)dst == 0x80000000UL) {
300 return 3;
301 } else if (dst) {
302 return 2;
303 } else {
304 return 0;
308 static uint32_t cc_calc_nabs_32(int32_t dst)
310 return !!dst;
313 static uint32_t cc_calc_comp_32(int32_t dst)
315 if ((uint32_t)dst == 0x80000000UL) {
316 return 3;
317 } else if (dst < 0) {
318 return 1;
319 } else if (dst > 0) {
320 return 2;
321 } else {
322 return 0;
326 /* calculate condition code for insert character under mask insn */
327 static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
329 if ((val & mask) == 0) {
330 return 0;
331 } else {
332 int top = clz64(mask);
333 if ((int64_t)(val << top) < 0) {
334 return 1;
335 } else {
336 return 2;
341 static uint32_t cc_calc_sla_32(uint32_t src, int shift)
343 uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
344 uint32_t sign = 1U << 31;
345 uint32_t match;
346 int32_t r;
348 /* Check if the sign bit stays the same. */
349 if (src & sign) {
350 match = mask;
351 } else {
352 match = 0;
354 if ((src & mask) != match) {
355 /* Overflow. */
356 return 3;
359 r = ((src << shift) & ~sign) | (src & sign);
360 if (r == 0) {
361 return 0;
362 } else if (r < 0) {
363 return 1;
365 return 2;
368 static uint32_t cc_calc_sla_64(uint64_t src, int shift)
370 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
371 uint64_t sign = 1ULL << 63;
372 uint64_t match;
373 int64_t r;
375 /* Check if the sign bit stays the same. */
376 if (src & sign) {
377 match = mask;
378 } else {
379 match = 0;
381 if ((src & mask) != match) {
382 /* Overflow. */
383 return 3;
386 r = ((src << shift) & ~sign) | (src & sign);
387 if (r == 0) {
388 return 0;
389 } else if (r < 0) {
390 return 1;
392 return 2;
395 static uint32_t cc_calc_flogr(uint64_t dst)
397 return dst ? 2 : 0;
400 static uint32_t cc_calc_lcbb(uint64_t dst)
402 return dst == 16 ? 0 : 3;
405 static uint32_t cc_calc_vc(uint64_t low, uint64_t high)
407 if (high == -1ull && low == -1ull) {
408 /* all elements match */
409 return 0;
410 } else if (high == 0 && low == 0) {
411 /* no elements match */
412 return 3;
413 } else {
414 /* some elements but not all match */
415 return 1;
419 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
420 uint64_t src, uint64_t dst, uint64_t vr)
422 uint32_t r = 0;
424 switch (cc_op) {
425 case CC_OP_CONST0:
426 case CC_OP_CONST1:
427 case CC_OP_CONST2:
428 case CC_OP_CONST3:
429 /* cc_op value _is_ cc */
430 r = cc_op;
431 break;
432 case CC_OP_LTGT0_32:
433 r = cc_calc_ltgt0_32(dst);
434 break;
435 case CC_OP_LTGT0_64:
436 r = cc_calc_ltgt0_64(dst);
437 break;
438 case CC_OP_LTGT_32:
439 r = cc_calc_ltgt_32(src, dst);
440 break;
441 case CC_OP_LTGT_64:
442 r = cc_calc_ltgt_64(src, dst);
443 break;
444 case CC_OP_LTUGTU_32:
445 r = cc_calc_ltugtu_32(src, dst);
446 break;
447 case CC_OP_LTUGTU_64:
448 r = cc_calc_ltugtu_64(src, dst);
449 break;
450 case CC_OP_TM_32:
451 r = cc_calc_tm_32(src, dst);
452 break;
453 case CC_OP_TM_64:
454 r = cc_calc_tm_64(src, dst);
455 break;
456 case CC_OP_NZ:
457 r = cc_calc_nz(dst);
458 break;
459 case CC_OP_ADD_64:
460 r = cc_calc_add_64(src, dst, vr);
461 break;
462 case CC_OP_ADDU_64:
463 r = cc_calc_addu_64(src, dst, vr);
464 break;
465 case CC_OP_ADDC_64:
466 r = cc_calc_addc_64(src, dst, vr);
467 break;
468 case CC_OP_SUB_64:
469 r = cc_calc_sub_64(src, dst, vr);
470 break;
471 case CC_OP_SUBU_64:
472 r = cc_calc_subu_64(src, dst, vr);
473 break;
474 case CC_OP_SUBB_64:
475 r = cc_calc_subb_64(src, dst, vr);
476 break;
477 case CC_OP_ABS_64:
478 r = cc_calc_abs_64(dst);
479 break;
480 case CC_OP_NABS_64:
481 r = cc_calc_nabs_64(dst);
482 break;
483 case CC_OP_COMP_64:
484 r = cc_calc_comp_64(dst);
485 break;
487 case CC_OP_ADD_32:
488 r = cc_calc_add_32(src, dst, vr);
489 break;
490 case CC_OP_ADDU_32:
491 r = cc_calc_addu_32(src, dst, vr);
492 break;
493 case CC_OP_ADDC_32:
494 r = cc_calc_addc_32(src, dst, vr);
495 break;
496 case CC_OP_SUB_32:
497 r = cc_calc_sub_32(src, dst, vr);
498 break;
499 case CC_OP_SUBU_32:
500 r = cc_calc_subu_32(src, dst, vr);
501 break;
502 case CC_OP_SUBB_32:
503 r = cc_calc_subb_32(src, dst, vr);
504 break;
505 case CC_OP_ABS_32:
506 r = cc_calc_abs_32(dst);
507 break;
508 case CC_OP_NABS_32:
509 r = cc_calc_nabs_32(dst);
510 break;
511 case CC_OP_COMP_32:
512 r = cc_calc_comp_32(dst);
513 break;
515 case CC_OP_ICM:
516 r = cc_calc_icm(src, dst);
517 break;
518 case CC_OP_SLA_32:
519 r = cc_calc_sla_32(src, dst);
520 break;
521 case CC_OP_SLA_64:
522 r = cc_calc_sla_64(src, dst);
523 break;
524 case CC_OP_FLOGR:
525 r = cc_calc_flogr(dst);
526 break;
527 case CC_OP_LCBB:
528 r = cc_calc_lcbb(dst);
529 break;
530 case CC_OP_VC:
531 r = cc_calc_vc(src, dst);
532 break;
534 case CC_OP_NZ_F32:
535 r = set_cc_nz_f32(dst);
536 break;
537 case CC_OP_NZ_F64:
538 r = set_cc_nz_f64(dst);
539 break;
540 case CC_OP_NZ_F128:
541 r = set_cc_nz_f128(make_float128(src, dst));
542 break;
544 default:
545 cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op));
548 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
549 cc_name(cc_op), src, dst, vr, r);
550 return r;
553 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
554 uint64_t vr)
556 return do_calc_cc(env, cc_op, src, dst, vr);
559 uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
560 uint64_t dst, uint64_t vr)
562 return do_calc_cc(env, cc_op, src, dst, vr);
565 #ifndef CONFIG_USER_ONLY
566 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
568 load_psw(env, mask, addr);
569 cpu_loop_exit(env_cpu(env));
572 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
574 HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
576 switch (a1 & 0xf00) {
577 case 0x000:
578 env->psw.mask &= ~PSW_MASK_ASC;
579 env->psw.mask |= PSW_ASC_PRIMARY;
580 break;
581 case 0x100:
582 env->psw.mask &= ~PSW_MASK_ASC;
583 env->psw.mask |= PSW_ASC_SECONDARY;
584 break;
585 case 0x300:
586 env->psw.mask &= ~PSW_MASK_ASC;
587 env->psw.mask |= PSW_ASC_HOME;
588 break;
589 default:
590 HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
591 s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
592 break;
595 #endif