hw/dma: Add SiFive platform DMA controller emulation
[qemu/ar7.git] / target / s390x / cc_helper.c
blob44731e4a85c399bd20482dd81daf7559233785a2
1 /*
2 * S/390 condition code helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "tcg_s390x.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/host-utils.h"
29 /* #define DEBUG_HELPER */
30 #ifdef DEBUG_HELPER
31 #define HELPER_LOG(x...) qemu_log(x)
32 #else
33 #define HELPER_LOG(x...)
34 #endif
36 static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
38 if (src == dst) {
39 return 0;
40 } else if (src < dst) {
41 return 1;
42 } else {
43 return 2;
47 static uint32_t cc_calc_ltgt0_32(int32_t dst)
49 return cc_calc_ltgt_32(dst, 0);
52 static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
54 if (src == dst) {
55 return 0;
56 } else if (src < dst) {
57 return 1;
58 } else {
59 return 2;
63 static uint32_t cc_calc_ltgt0_64(int64_t dst)
65 return cc_calc_ltgt_64(dst, 0);
68 static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
70 if (src == dst) {
71 return 0;
72 } else if (src < dst) {
73 return 1;
74 } else {
75 return 2;
79 static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
81 if (src == dst) {
82 return 0;
83 } else if (src < dst) {
84 return 1;
85 } else {
86 return 2;
90 static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
92 uint32_t r = val & mask;
94 if (r == 0) {
95 return 0;
96 } else if (r == mask) {
97 return 3;
98 } else {
99 return 1;
103 static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
105 uint64_t r = val & mask;
107 if (r == 0) {
108 return 0;
109 } else if (r == mask) {
110 return 3;
111 } else {
112 int top = clz64(mask);
113 if ((int64_t)(val << top) < 0) {
114 return 2;
115 } else {
116 return 1;
121 static uint32_t cc_calc_nz(uint64_t dst)
123 return !!dst;
126 static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
128 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
129 return 3; /* overflow */
130 } else {
131 if (ar < 0) {
132 return 1;
133 } else if (ar > 0) {
134 return 2;
135 } else {
136 return 0;
141 static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
143 return (ar != 0) + 2 * (ar < a1);
146 static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
148 /* Recover a2 + carry_in. */
149 uint64_t a2c = ar - a1;
150 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
151 int carry_out = (a2c < a2) || (ar < a1);
153 return (ar != 0) + 2 * carry_out;
156 static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
158 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
159 return 3; /* overflow */
160 } else {
161 if (ar < 0) {
162 return 1;
163 } else if (ar > 0) {
164 return 2;
165 } else {
166 return 0;
171 static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
173 if (ar == 0) {
174 return 2;
175 } else {
176 if (a2 > a1) {
177 return 1;
178 } else {
179 return 3;
184 static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
186 int borrow_out;
188 if (ar != a1 - a2) { /* difference means borrow-in */
189 borrow_out = (a2 >= a1);
190 } else {
191 borrow_out = (a2 > a1);
194 return (ar != 0) + 2 * !borrow_out;
197 static uint32_t cc_calc_abs_64(int64_t dst)
199 if ((uint64_t)dst == 0x8000000000000000ULL) {
200 return 3;
201 } else if (dst) {
202 return 2;
203 } else {
204 return 0;
208 static uint32_t cc_calc_nabs_64(int64_t dst)
210 return !!dst;
213 static uint32_t cc_calc_comp_64(int64_t dst)
215 if ((uint64_t)dst == 0x8000000000000000ULL) {
216 return 3;
217 } else if (dst < 0) {
218 return 1;
219 } else if (dst > 0) {
220 return 2;
221 } else {
222 return 0;
227 static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
229 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
230 return 3; /* overflow */
231 } else {
232 if (ar < 0) {
233 return 1;
234 } else if (ar > 0) {
235 return 2;
236 } else {
237 return 0;
242 static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
244 return (ar != 0) + 2 * (ar < a1);
247 static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
249 /* Recover a2 + carry_in. */
250 uint32_t a2c = ar - a1;
251 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
252 int carry_out = (a2c < a2) || (ar < a1);
254 return (ar != 0) + 2 * carry_out;
257 static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
259 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
260 return 3; /* overflow */
261 } else {
262 if (ar < 0) {
263 return 1;
264 } else if (ar > 0) {
265 return 2;
266 } else {
267 return 0;
272 static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
274 if (ar == 0) {
275 return 2;
276 } else {
277 if (a2 > a1) {
278 return 1;
279 } else {
280 return 3;
285 static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
287 int borrow_out;
289 if (ar != a1 - a2) { /* difference means borrow-in */
290 borrow_out = (a2 >= a1);
291 } else {
292 borrow_out = (a2 > a1);
295 return (ar != 0) + 2 * !borrow_out;
298 static uint32_t cc_calc_abs_32(int32_t dst)
300 if ((uint32_t)dst == 0x80000000UL) {
301 return 3;
302 } else if (dst) {
303 return 2;
304 } else {
305 return 0;
309 static uint32_t cc_calc_nabs_32(int32_t dst)
311 return !!dst;
314 static uint32_t cc_calc_comp_32(int32_t dst)
316 if ((uint32_t)dst == 0x80000000UL) {
317 return 3;
318 } else if (dst < 0) {
319 return 1;
320 } else if (dst > 0) {
321 return 2;
322 } else {
323 return 0;
327 /* calculate condition code for insert character under mask insn */
328 static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
330 if ((val & mask) == 0) {
331 return 0;
332 } else {
333 int top = clz64(mask);
334 if ((int64_t)(val << top) < 0) {
335 return 1;
336 } else {
337 return 2;
342 static uint32_t cc_calc_sla_32(uint32_t src, int shift)
344 uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
345 uint32_t sign = 1U << 31;
346 uint32_t match;
347 int32_t r;
349 /* Check if the sign bit stays the same. */
350 if (src & sign) {
351 match = mask;
352 } else {
353 match = 0;
355 if ((src & mask) != match) {
356 /* Overflow. */
357 return 3;
360 r = ((src << shift) & ~sign) | (src & sign);
361 if (r == 0) {
362 return 0;
363 } else if (r < 0) {
364 return 1;
366 return 2;
369 static uint32_t cc_calc_sla_64(uint64_t src, int shift)
371 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
372 uint64_t sign = 1ULL << 63;
373 uint64_t match;
374 int64_t r;
376 /* Check if the sign bit stays the same. */
377 if (src & sign) {
378 match = mask;
379 } else {
380 match = 0;
382 if ((src & mask) != match) {
383 /* Overflow. */
384 return 3;
387 r = ((src << shift) & ~sign) | (src & sign);
388 if (r == 0) {
389 return 0;
390 } else if (r < 0) {
391 return 1;
393 return 2;
396 static uint32_t cc_calc_flogr(uint64_t dst)
398 return dst ? 2 : 0;
401 static uint32_t cc_calc_lcbb(uint64_t dst)
403 return dst == 16 ? 0 : 3;
406 static uint32_t cc_calc_vc(uint64_t low, uint64_t high)
408 if (high == -1ull && low == -1ull) {
409 /* all elements match */
410 return 0;
411 } else if (high == 0 && low == 0) {
412 /* no elements match */
413 return 3;
414 } else {
415 /* some elements but not all match */
416 return 1;
420 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
421 uint64_t src, uint64_t dst, uint64_t vr)
423 uint32_t r = 0;
425 switch (cc_op) {
426 case CC_OP_CONST0:
427 case CC_OP_CONST1:
428 case CC_OP_CONST2:
429 case CC_OP_CONST3:
430 /* cc_op value _is_ cc */
431 r = cc_op;
432 break;
433 case CC_OP_LTGT0_32:
434 r = cc_calc_ltgt0_32(dst);
435 break;
436 case CC_OP_LTGT0_64:
437 r = cc_calc_ltgt0_64(dst);
438 break;
439 case CC_OP_LTGT_32:
440 r = cc_calc_ltgt_32(src, dst);
441 break;
442 case CC_OP_LTGT_64:
443 r = cc_calc_ltgt_64(src, dst);
444 break;
445 case CC_OP_LTUGTU_32:
446 r = cc_calc_ltugtu_32(src, dst);
447 break;
448 case CC_OP_LTUGTU_64:
449 r = cc_calc_ltugtu_64(src, dst);
450 break;
451 case CC_OP_TM_32:
452 r = cc_calc_tm_32(src, dst);
453 break;
454 case CC_OP_TM_64:
455 r = cc_calc_tm_64(src, dst);
456 break;
457 case CC_OP_NZ:
458 r = cc_calc_nz(dst);
459 break;
460 case CC_OP_ADD_64:
461 r = cc_calc_add_64(src, dst, vr);
462 break;
463 case CC_OP_ADDU_64:
464 r = cc_calc_addu_64(src, dst, vr);
465 break;
466 case CC_OP_ADDC_64:
467 r = cc_calc_addc_64(src, dst, vr);
468 break;
469 case CC_OP_SUB_64:
470 r = cc_calc_sub_64(src, dst, vr);
471 break;
472 case CC_OP_SUBU_64:
473 r = cc_calc_subu_64(src, dst, vr);
474 break;
475 case CC_OP_SUBB_64:
476 r = cc_calc_subb_64(src, dst, vr);
477 break;
478 case CC_OP_ABS_64:
479 r = cc_calc_abs_64(dst);
480 break;
481 case CC_OP_NABS_64:
482 r = cc_calc_nabs_64(dst);
483 break;
484 case CC_OP_COMP_64:
485 r = cc_calc_comp_64(dst);
486 break;
488 case CC_OP_ADD_32:
489 r = cc_calc_add_32(src, dst, vr);
490 break;
491 case CC_OP_ADDU_32:
492 r = cc_calc_addu_32(src, dst, vr);
493 break;
494 case CC_OP_ADDC_32:
495 r = cc_calc_addc_32(src, dst, vr);
496 break;
497 case CC_OP_SUB_32:
498 r = cc_calc_sub_32(src, dst, vr);
499 break;
500 case CC_OP_SUBU_32:
501 r = cc_calc_subu_32(src, dst, vr);
502 break;
503 case CC_OP_SUBB_32:
504 r = cc_calc_subb_32(src, dst, vr);
505 break;
506 case CC_OP_ABS_32:
507 r = cc_calc_abs_32(dst);
508 break;
509 case CC_OP_NABS_32:
510 r = cc_calc_nabs_32(dst);
511 break;
512 case CC_OP_COMP_32:
513 r = cc_calc_comp_32(dst);
514 break;
516 case CC_OP_ICM:
517 r = cc_calc_icm(src, dst);
518 break;
519 case CC_OP_SLA_32:
520 r = cc_calc_sla_32(src, dst);
521 break;
522 case CC_OP_SLA_64:
523 r = cc_calc_sla_64(src, dst);
524 break;
525 case CC_OP_FLOGR:
526 r = cc_calc_flogr(dst);
527 break;
528 case CC_OP_LCBB:
529 r = cc_calc_lcbb(dst);
530 break;
531 case CC_OP_VC:
532 r = cc_calc_vc(src, dst);
533 break;
535 case CC_OP_NZ_F32:
536 r = set_cc_nz_f32(dst);
537 break;
538 case CC_OP_NZ_F64:
539 r = set_cc_nz_f64(dst);
540 break;
541 case CC_OP_NZ_F128:
542 r = set_cc_nz_f128(make_float128(src, dst));
543 break;
545 default:
546 cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op));
549 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
550 cc_name(cc_op), src, dst, vr, r);
551 return r;
554 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
555 uint64_t vr)
557 return do_calc_cc(env, cc_op, src, dst, vr);
560 uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
561 uint64_t dst, uint64_t vr)
563 return do_calc_cc(env, cc_op, src, dst, vr);
566 #ifndef CONFIG_USER_ONLY
567 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
569 load_psw(env, mask, addr);
570 cpu_loop_exit(env_cpu(env));
573 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
575 HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
577 switch (a1 & 0xf00) {
578 case 0x000:
579 env->psw.mask &= ~PSW_MASK_ASC;
580 env->psw.mask |= PSW_ASC_PRIMARY;
581 break;
582 case 0x100:
583 env->psw.mask &= ~PSW_MASK_ASC;
584 env->psw.mask |= PSW_ASC_SECONDARY;
585 break;
586 case 0x300:
587 env->psw.mask &= ~PSW_MASK_ASC;
588 env->psw.mask |= PSW_ASC_HOME;
589 break;
590 default:
591 HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
592 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
595 #endif