slirp: fix segv when init failed
[qemu.git] / target-s390x / cc_helper.c
blob1cf855133e31682da0a608aea555555af70f91c6
1 /*
2 * S/390 condition code helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/host-utils.h"
27 /* #define DEBUG_HELPER */
28 #ifdef DEBUG_HELPER
29 #define HELPER_LOG(x...) qemu_log(x)
30 #else
31 #define HELPER_LOG(x...)
32 #endif
34 static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
36 if (src == dst) {
37 return 0;
38 } else if (src < dst) {
39 return 1;
40 } else {
41 return 2;
45 static uint32_t cc_calc_ltgt0_32(int32_t dst)
47 return cc_calc_ltgt_32(dst, 0);
50 static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
52 if (src == dst) {
53 return 0;
54 } else if (src < dst) {
55 return 1;
56 } else {
57 return 2;
61 static uint32_t cc_calc_ltgt0_64(int64_t dst)
63 return cc_calc_ltgt_64(dst, 0);
66 static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
68 if (src == dst) {
69 return 0;
70 } else if (src < dst) {
71 return 1;
72 } else {
73 return 2;
77 static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
79 if (src == dst) {
80 return 0;
81 } else if (src < dst) {
82 return 1;
83 } else {
84 return 2;
88 static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
90 uint32_t r = val & mask;
92 if (r == 0) {
93 return 0;
94 } else if (r == mask) {
95 return 3;
96 } else {
97 return 1;
101 static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
103 uint64_t r = val & mask;
105 if (r == 0) {
106 return 0;
107 } else if (r == mask) {
108 return 3;
109 } else {
110 int top = clz64(mask);
111 if ((int64_t)(val << top) < 0) {
112 return 2;
113 } else {
114 return 1;
119 static uint32_t cc_calc_nz(uint64_t dst)
121 return !!dst;
124 static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
126 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
127 return 3; /* overflow */
128 } else {
129 if (ar < 0) {
130 return 1;
131 } else if (ar > 0) {
132 return 2;
133 } else {
134 return 0;
139 static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
141 return (ar != 0) + 2 * (ar < a1);
144 static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
146 /* Recover a2 + carry_in. */
147 uint64_t a2c = ar - a1;
148 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
149 int carry_out = (a2c < a2) || (ar < a1);
151 return (ar != 0) + 2 * carry_out;
154 static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
156 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
157 return 3; /* overflow */
158 } else {
159 if (ar < 0) {
160 return 1;
161 } else if (ar > 0) {
162 return 2;
163 } else {
164 return 0;
169 static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
171 if (ar == 0) {
172 return 2;
173 } else {
174 if (a2 > a1) {
175 return 1;
176 } else {
177 return 3;
182 static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
184 int borrow_out;
186 if (ar != a1 - a2) { /* difference means borrow-in */
187 borrow_out = (a2 >= a1);
188 } else {
189 borrow_out = (a2 > a1);
192 return (ar != 0) + 2 * !borrow_out;
195 static uint32_t cc_calc_abs_64(int64_t dst)
197 if ((uint64_t)dst == 0x8000000000000000ULL) {
198 return 3;
199 } else if (dst) {
200 return 2;
201 } else {
202 return 0;
206 static uint32_t cc_calc_nabs_64(int64_t dst)
208 return !!dst;
211 static uint32_t cc_calc_comp_64(int64_t dst)
213 if ((uint64_t)dst == 0x8000000000000000ULL) {
214 return 3;
215 } else if (dst < 0) {
216 return 1;
217 } else if (dst > 0) {
218 return 2;
219 } else {
220 return 0;
225 static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
227 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
228 return 3; /* overflow */
229 } else {
230 if (ar < 0) {
231 return 1;
232 } else if (ar > 0) {
233 return 2;
234 } else {
235 return 0;
240 static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
242 return (ar != 0) + 2 * (ar < a1);
245 static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
247 /* Recover a2 + carry_in. */
248 uint32_t a2c = ar - a1;
249 /* Check for a2+carry_in overflow, then a1+a2c overflow. */
250 int carry_out = (a2c < a2) || (ar < a1);
252 return (ar != 0) + 2 * carry_out;
255 static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
257 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
258 return 3; /* overflow */
259 } else {
260 if (ar < 0) {
261 return 1;
262 } else if (ar > 0) {
263 return 2;
264 } else {
265 return 0;
270 static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
272 if (ar == 0) {
273 return 2;
274 } else {
275 if (a2 > a1) {
276 return 1;
277 } else {
278 return 3;
283 static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
285 int borrow_out;
287 if (ar != a1 - a2) { /* difference means borrow-in */
288 borrow_out = (a2 >= a1);
289 } else {
290 borrow_out = (a2 > a1);
293 return (ar != 0) + 2 * !borrow_out;
296 static uint32_t cc_calc_abs_32(int32_t dst)
298 if ((uint32_t)dst == 0x80000000UL) {
299 return 3;
300 } else if (dst) {
301 return 2;
302 } else {
303 return 0;
307 static uint32_t cc_calc_nabs_32(int32_t dst)
309 return !!dst;
312 static uint32_t cc_calc_comp_32(int32_t dst)
314 if ((uint32_t)dst == 0x80000000UL) {
315 return 3;
316 } else if (dst < 0) {
317 return 1;
318 } else if (dst > 0) {
319 return 2;
320 } else {
321 return 0;
325 /* calculate condition code for insert character under mask insn */
326 static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
328 if ((val & mask) == 0) {
329 return 0;
330 } else {
331 int top = clz64(mask);
332 if ((int64_t)(val << top) < 0) {
333 return 1;
334 } else {
335 return 2;
340 static uint32_t cc_calc_sla_32(uint32_t src, int shift)
342 uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
343 uint32_t sign = 1U << 31;
344 uint32_t match;
345 int32_t r;
347 /* Check if the sign bit stays the same. */
348 if (src & sign) {
349 match = mask;
350 } else {
351 match = 0;
353 if ((src & mask) != match) {
354 /* Overflow. */
355 return 3;
358 r = ((src << shift) & ~sign) | (src & sign);
359 if (r == 0) {
360 return 0;
361 } else if (r < 0) {
362 return 1;
364 return 2;
367 static uint32_t cc_calc_sla_64(uint64_t src, int shift)
369 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
370 uint64_t sign = 1ULL << 63;
371 uint64_t match;
372 int64_t r;
374 /* Check if the sign bit stays the same. */
375 if (src & sign) {
376 match = mask;
377 } else {
378 match = 0;
380 if ((src & mask) != match) {
381 /* Overflow. */
382 return 3;
385 r = ((src << shift) & ~sign) | (src & sign);
386 if (r == 0) {
387 return 0;
388 } else if (r < 0) {
389 return 1;
391 return 2;
394 static uint32_t cc_calc_flogr(uint64_t dst)
396 return dst ? 2 : 0;
399 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
400 uint64_t src, uint64_t dst, uint64_t vr)
402 S390CPU *cpu = s390_env_get_cpu(env);
403 uint32_t r = 0;
405 switch (cc_op) {
406 case CC_OP_CONST0:
407 case CC_OP_CONST1:
408 case CC_OP_CONST2:
409 case CC_OP_CONST3:
410 /* cc_op value _is_ cc */
411 r = cc_op;
412 break;
413 case CC_OP_LTGT0_32:
414 r = cc_calc_ltgt0_32(dst);
415 break;
416 case CC_OP_LTGT0_64:
417 r = cc_calc_ltgt0_64(dst);
418 break;
419 case CC_OP_LTGT_32:
420 r = cc_calc_ltgt_32(src, dst);
421 break;
422 case CC_OP_LTGT_64:
423 r = cc_calc_ltgt_64(src, dst);
424 break;
425 case CC_OP_LTUGTU_32:
426 r = cc_calc_ltugtu_32(src, dst);
427 break;
428 case CC_OP_LTUGTU_64:
429 r = cc_calc_ltugtu_64(src, dst);
430 break;
431 case CC_OP_TM_32:
432 r = cc_calc_tm_32(src, dst);
433 break;
434 case CC_OP_TM_64:
435 r = cc_calc_tm_64(src, dst);
436 break;
437 case CC_OP_NZ:
438 r = cc_calc_nz(dst);
439 break;
440 case CC_OP_ADD_64:
441 r = cc_calc_add_64(src, dst, vr);
442 break;
443 case CC_OP_ADDU_64:
444 r = cc_calc_addu_64(src, dst, vr);
445 break;
446 case CC_OP_ADDC_64:
447 r = cc_calc_addc_64(src, dst, vr);
448 break;
449 case CC_OP_SUB_64:
450 r = cc_calc_sub_64(src, dst, vr);
451 break;
452 case CC_OP_SUBU_64:
453 r = cc_calc_subu_64(src, dst, vr);
454 break;
455 case CC_OP_SUBB_64:
456 r = cc_calc_subb_64(src, dst, vr);
457 break;
458 case CC_OP_ABS_64:
459 r = cc_calc_abs_64(dst);
460 break;
461 case CC_OP_NABS_64:
462 r = cc_calc_nabs_64(dst);
463 break;
464 case CC_OP_COMP_64:
465 r = cc_calc_comp_64(dst);
466 break;
468 case CC_OP_ADD_32:
469 r = cc_calc_add_32(src, dst, vr);
470 break;
471 case CC_OP_ADDU_32:
472 r = cc_calc_addu_32(src, dst, vr);
473 break;
474 case CC_OP_ADDC_32:
475 r = cc_calc_addc_32(src, dst, vr);
476 break;
477 case CC_OP_SUB_32:
478 r = cc_calc_sub_32(src, dst, vr);
479 break;
480 case CC_OP_SUBU_32:
481 r = cc_calc_subu_32(src, dst, vr);
482 break;
483 case CC_OP_SUBB_32:
484 r = cc_calc_subb_32(src, dst, vr);
485 break;
486 case CC_OP_ABS_32:
487 r = cc_calc_abs_32(dst);
488 break;
489 case CC_OP_NABS_32:
490 r = cc_calc_nabs_32(dst);
491 break;
492 case CC_OP_COMP_32:
493 r = cc_calc_comp_32(dst);
494 break;
496 case CC_OP_ICM:
497 r = cc_calc_icm(src, dst);
498 break;
499 case CC_OP_SLA_32:
500 r = cc_calc_sla_32(src, dst);
501 break;
502 case CC_OP_SLA_64:
503 r = cc_calc_sla_64(src, dst);
504 break;
505 case CC_OP_FLOGR:
506 r = cc_calc_flogr(dst);
507 break;
509 case CC_OP_NZ_F32:
510 r = set_cc_nz_f32(dst);
511 break;
512 case CC_OP_NZ_F64:
513 r = set_cc_nz_f64(dst);
514 break;
515 case CC_OP_NZ_F128:
516 r = set_cc_nz_f128(make_float128(src, dst));
517 break;
519 default:
520 cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op));
523 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
524 cc_name(cc_op), src, dst, vr, r);
525 return r;
528 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
529 uint64_t vr)
531 return do_calc_cc(env, cc_op, src, dst, vr);
534 uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
535 uint64_t dst, uint64_t vr)
537 return do_calc_cc(env, cc_op, src, dst, vr);
540 #ifndef CONFIG_USER_ONLY
541 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
543 load_psw(env, mask, addr);
544 cpu_loop_exit(CPU(s390_env_get_cpu(env)));
547 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
549 HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
551 switch (a1 & 0xf00) {
552 case 0x000:
553 env->psw.mask &= ~PSW_MASK_ASC;
554 env->psw.mask |= PSW_ASC_PRIMARY;
555 break;
556 case 0x100:
557 env->psw.mask &= ~PSW_MASK_ASC;
558 env->psw.mask |= PSW_ASC_SECONDARY;
559 break;
560 case 0x300:
561 env->psw.mask &= ~PSW_MASK_ASC;
562 env->psw.mask |= PSW_ASC_HOME;
563 break;
564 default:
565 HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
566 program_interrupt(env, PGM_SPECIFICATION, 2);
567 break;
570 #endif