target/ppc: introduce get_dfp{64,128}() helper functions
[qemu/ar7.git] / target / ppc / dfp_helper.c
blobe4369ebe9bb5d51d3197f719f0e8adda238ec40c
1 /*
2 * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU.
4 * Copyright (c) 2014 IBM Corporation.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
24 #define DECNUMDIGITS 34
25 #include "libdecnumber/decContext.h"
26 #include "libdecnumber/decNumber.h"
27 #include "libdecnumber/dpd/decimal32.h"
28 #include "libdecnumber/dpd/decimal64.h"
29 #include "libdecnumber/dpd/decimal128.h"
31 #if defined(HOST_WORDS_BIGENDIAN)
32 #define HI_IDX 0
33 #define LO_IDX 1
34 #else
35 #define HI_IDX 1
36 #define LO_IDX 0
37 #endif
39 static void get_dfp64(uint64_t *dst, uint64_t *dfp)
41 dst[0] = dfp[0];
44 static void get_dfp128(uint64_t *dst, uint64_t *dfp)
46 dst[0] = dfp[HI_IDX];
47 dst[1] = dfp[LO_IDX];
50 struct PPC_DFP {
51 CPUPPCState *env;
52 uint64_t t64[2], a64[2], b64[2];
53 decNumber t, a, b;
54 decContext context;
55 uint8_t crbf;
58 static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr)
60 enum rounding rnd;
62 switch ((fpscr & FP_DRN) >> FPSCR_DRN0) {
63 case 0:
64 rnd = DEC_ROUND_HALF_EVEN;
65 break;
66 case 1:
67 rnd = DEC_ROUND_DOWN;
68 break;
69 case 2:
70 rnd = DEC_ROUND_CEILING;
71 break;
72 case 3:
73 rnd = DEC_ROUND_FLOOR;
74 break;
75 case 4:
76 rnd = DEC_ROUND_HALF_UP;
77 break;
78 case 5:
79 rnd = DEC_ROUND_HALF_DOWN;
80 break;
81 case 6:
82 rnd = DEC_ROUND_UP;
83 break;
84 case 7:
85 rnd = DEC_ROUND_05UP;
86 break;
87 default:
88 g_assert_not_reached();
91 decContextSetRounding(context, rnd);
94 static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc,
95 struct PPC_DFP *dfp)
97 enum rounding rnd;
98 if (r == 0) {
99 switch (rmc & 3) {
100 case 0:
101 rnd = DEC_ROUND_HALF_EVEN;
102 break;
103 case 1:
104 rnd = DEC_ROUND_DOWN;
105 break;
106 case 2:
107 rnd = DEC_ROUND_HALF_UP;
108 break;
109 case 3: /* use FPSCR rounding mode */
110 return;
111 default:
112 assert(0); /* cannot get here */
114 } else { /* r == 1 */
115 switch (rmc & 3) {
116 case 0:
117 rnd = DEC_ROUND_CEILING;
118 break;
119 case 1:
120 rnd = DEC_ROUND_FLOOR;
121 break;
122 case 2:
123 rnd = DEC_ROUND_UP;
124 break;
125 case 3:
126 rnd = DEC_ROUND_HALF_DOWN;
127 break;
128 default:
129 assert(0); /* cannot get here */
132 decContextSetRounding(&dfp->context, rnd);
135 static void dfp_prepare_decimal64(struct PPC_DFP *dfp, uint64_t *a,
136 uint64_t *b, CPUPPCState *env)
138 decContextDefault(&dfp->context, DEC_INIT_DECIMAL64);
139 dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
140 dfp->env = env;
142 if (a) {
143 get_dfp64(dfp->a64, a);
144 decimal64ToNumber((decimal64 *)dfp->a64, &dfp->a);
145 } else {
146 dfp->a64[0] = 0;
147 decNumberZero(&dfp->a);
150 if (b) {
151 get_dfp64(dfp->b64, b);
152 decimal64ToNumber((decimal64 *)dfp->b64, &dfp->b);
153 } else {
154 dfp->b64[0] = 0;
155 decNumberZero(&dfp->b);
159 static void dfp_prepare_decimal128(struct PPC_DFP *dfp, uint64_t *a,
160 uint64_t *b, CPUPPCState *env)
162 decContextDefault(&dfp->context, DEC_INIT_DECIMAL128);
163 dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
164 dfp->env = env;
166 if (a) {
167 get_dfp128(dfp->a64, a);
168 decimal128ToNumber((decimal128 *)dfp->a64, &dfp->a);
169 } else {
170 dfp->a64[0] = dfp->a64[1] = 0;
171 decNumberZero(&dfp->a);
174 if (b) {
175 get_dfp128(dfp->b64, b);
176 decimal128ToNumber((decimal128 *)dfp->b64, &dfp->b);
177 } else {
178 dfp->b64[0] = dfp->b64[1] = 0;
179 decNumberZero(&dfp->b);
183 static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag,
184 uint64_t enabled)
186 dfp->env->fpscr |= (flag | FP_FX);
187 if (dfp->env->fpscr & enabled) {
188 dfp->env->fpscr |= FP_FEX;
192 static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp,
193 decContext *context)
195 uint64_t fprf = 0;
197 /* construct FPRF */
198 switch (decNumberClass(&dfp->t, context)) {
199 case DEC_CLASS_SNAN:
200 fprf = 0x01;
201 break;
202 case DEC_CLASS_QNAN:
203 fprf = 0x11;
204 break;
205 case DEC_CLASS_NEG_INF:
206 fprf = 0x09;
207 break;
208 case DEC_CLASS_NEG_NORMAL:
209 fprf = 0x08;
210 break;
211 case DEC_CLASS_NEG_SUBNORMAL:
212 fprf = 0x18;
213 break;
214 case DEC_CLASS_NEG_ZERO:
215 fprf = 0x12;
216 break;
217 case DEC_CLASS_POS_ZERO:
218 fprf = 0x02;
219 break;
220 case DEC_CLASS_POS_SUBNORMAL:
221 fprf = 0x14;
222 break;
223 case DEC_CLASS_POS_NORMAL:
224 fprf = 0x04;
225 break;
226 case DEC_CLASS_POS_INF:
227 fprf = 0x05;
228 break;
229 default:
230 assert(0); /* should never get here */
232 dfp->env->fpscr &= ~FP_FPRF;
233 dfp->env->fpscr |= (fprf << FPSCR_FPRF);
236 static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp)
238 dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context);
241 static void dfp_set_FPRF_from_FRT_short(struct PPC_DFP *dfp)
243 decContext shortContext;
244 decContextDefault(&shortContext, DEC_INIT_DECIMAL32);
245 dfp_set_FPRF_from_FRT_with_context(dfp, &shortContext);
248 static void dfp_set_FPRF_from_FRT_long(struct PPC_DFP *dfp)
250 decContext longContext;
251 decContextDefault(&longContext, DEC_INIT_DECIMAL64);
252 dfp_set_FPRF_from_FRT_with_context(dfp, &longContext);
255 static void dfp_check_for_OX(struct PPC_DFP *dfp)
257 if (dfp->context.status & DEC_Overflow) {
258 dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE);
262 static void dfp_check_for_UX(struct PPC_DFP *dfp)
264 if (dfp->context.status & DEC_Underflow) {
265 dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE);
269 static void dfp_check_for_XX(struct PPC_DFP *dfp)
271 if (dfp->context.status & DEC_Inexact) {
272 dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE);
276 static void dfp_check_for_ZX(struct PPC_DFP *dfp)
278 if (dfp->context.status & DEC_Division_by_zero) {
279 dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE);
283 static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp)
285 if (dfp->context.status & DEC_Invalid_operation) {
286 if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) {
287 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
292 static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp)
294 if (decNumberIsSNaN(&dfp->t)) {
295 dfp->t.bits &= ~DECSNAN;
296 dfp->t.bits |= DECNAN;
297 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
301 static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign)
303 if (dfp->context.status & DEC_Invalid_operation) {
304 if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
305 int same = decNumberClass(&dfp->a, &dfp->context) ==
306 decNumberClass(&dfp->b, &dfp->context);
307 if ((same && testForSameSign) || (!same && !testForSameSign)) {
308 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE);
314 static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp)
316 dfp_check_for_VXISI(dfp, 0);
319 static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp)
321 dfp_check_for_VXISI(dfp, 1);
324 static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp)
326 if (dfp->context.status & DEC_Invalid_operation) {
327 if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) ||
328 (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) {
329 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE);
334 static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp)
336 if (dfp->context.status & DEC_Division_undefined) {
337 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE);
341 static void dfp_check_for_VXIDI(struct PPC_DFP *dfp)
343 if (dfp->context.status & DEC_Invalid_operation) {
344 if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
345 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE);
350 static void dfp_check_for_VXVC(struct PPC_DFP *dfp)
352 if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) {
353 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE);
357 static void dfp_check_for_VXCVI(struct PPC_DFP *dfp)
359 if ((dfp->context.status & DEC_Invalid_operation) &&
360 (!decNumberIsSNaN(&dfp->a)) &&
361 (!decNumberIsSNaN(&dfp->b))) {
362 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
366 static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp)
368 if (decNumberIsNaN(&dfp->t)) {
369 dfp->crbf = 1;
370 } else if (decNumberIsZero(&dfp->t)) {
371 dfp->crbf = 2;
372 } else if (decNumberIsNegative(&dfp->t)) {
373 dfp->crbf = 8;
374 } else {
375 dfp->crbf = 4;
379 static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp)
381 dfp->env->fpscr &= ~FP_FPCC;
382 dfp->env->fpscr |= (dfp->crbf << FPSCR_FPCC);
385 static inline void dfp_makeQNaN(decNumber *dn)
387 dn->bits &= ~DECSPECIAL;
388 dn->bits |= DECNAN;
391 static inline int dfp_get_digit(decNumber *dn, int n)
393 assert(DECDPUN == 3);
394 int unit = n / DECDPUN;
395 int dig = n % DECDPUN;
396 switch (dig) {
397 case 0:
398 return dn->lsu[unit] % 10;
399 case 1:
400 return (dn->lsu[unit] / 10) % 10;
401 case 2:
402 return dn->lsu[unit] / 100;
404 g_assert_not_reached();
407 #define DFP_HELPER_TAB(op, dnop, postprocs, size) \
408 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, uint64_t *b) \
410 struct PPC_DFP dfp; \
411 dfp_prepare_decimal##size(&dfp, a, b, env); \
412 dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
413 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
414 postprocs(&dfp); \
415 if (size == 64) { \
416 t[0] = dfp.t64[0]; \
417 } else if (size == 128) { \
418 t[0] = dfp.t64[HI_IDX]; \
419 t[1] = dfp.t64[LO_IDX]; \
423 static void ADD_PPs(struct PPC_DFP *dfp)
425 dfp_set_FPRF_from_FRT(dfp);
426 dfp_check_for_OX(dfp);
427 dfp_check_for_UX(dfp);
428 dfp_check_for_XX(dfp);
429 dfp_check_for_VXSNAN(dfp);
430 dfp_check_for_VXISI_add(dfp);
433 DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64)
434 DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128)
436 static void SUB_PPs(struct PPC_DFP *dfp)
438 dfp_set_FPRF_from_FRT(dfp);
439 dfp_check_for_OX(dfp);
440 dfp_check_for_UX(dfp);
441 dfp_check_for_XX(dfp);
442 dfp_check_for_VXSNAN(dfp);
443 dfp_check_for_VXISI_subtract(dfp);
446 DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64)
447 DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128)
449 static void MUL_PPs(struct PPC_DFP *dfp)
451 dfp_set_FPRF_from_FRT(dfp);
452 dfp_check_for_OX(dfp);
453 dfp_check_for_UX(dfp);
454 dfp_check_for_XX(dfp);
455 dfp_check_for_VXSNAN(dfp);
456 dfp_check_for_VXIMZ(dfp);
459 DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64)
460 DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128)
462 static void DIV_PPs(struct PPC_DFP *dfp)
464 dfp_set_FPRF_from_FRT(dfp);
465 dfp_check_for_OX(dfp);
466 dfp_check_for_UX(dfp);
467 dfp_check_for_ZX(dfp);
468 dfp_check_for_XX(dfp);
469 dfp_check_for_VXSNAN(dfp);
470 dfp_check_for_VXZDZ(dfp);
471 dfp_check_for_VXIDI(dfp);
474 DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64)
475 DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128)
477 #define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \
478 uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
480 struct PPC_DFP dfp; \
481 dfp_prepare_decimal##size(&dfp, a, b, env); \
482 dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
483 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
484 postprocs(&dfp); \
485 return dfp.crbf; \
488 static void CMPU_PPs(struct PPC_DFP *dfp)
490 dfp_set_CRBF_from_T(dfp);
491 dfp_set_FPCC_from_CRBF(dfp);
492 dfp_check_for_VXSNAN(dfp);
495 DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64)
496 DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128)
498 static void CMPO_PPs(struct PPC_DFP *dfp)
500 dfp_set_CRBF_from_T(dfp);
501 dfp_set_FPCC_from_CRBF(dfp);
502 dfp_check_for_VXSNAN(dfp);
503 dfp_check_for_VXVC(dfp);
506 DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64)
507 DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128)
509 #define DFP_HELPER_TSTDC(op, size) \
510 uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint32_t dcm) \
512 struct PPC_DFP dfp; \
513 int match = 0; \
515 dfp_prepare_decimal##size(&dfp, a, 0, env); \
517 match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \
518 match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \
519 match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \
520 match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \
521 match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \
522 match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \
524 if (decNumberIsNegative(&dfp.a)) { \
525 dfp.crbf = match ? 0xA : 0x8; \
526 } else { \
527 dfp.crbf = match ? 0x2 : 0x0; \
530 dfp_set_FPCC_from_CRBF(&dfp); \
531 return dfp.crbf; \
534 DFP_HELPER_TSTDC(dtstdc, 64)
535 DFP_HELPER_TSTDC(dtstdcq, 128)
537 #define DFP_HELPER_TSTDG(op, size) \
538 uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint32_t dcm) \
540 struct PPC_DFP dfp; \
541 int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \
542 is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \
543 match; \
545 dfp_prepare_decimal##size(&dfp, a, 0, env); \
547 if ((size) == 64) { \
548 minexp = -398; \
549 maxexp = 369; \
550 nzero_digits = 16; \
551 nzero_idx = 5; \
552 } else if ((size) == 128) { \
553 minexp = -6176; \
554 maxexp = 6111; \
555 nzero_digits = 34; \
556 nzero_idx = 11; \
559 is_negative = decNumberIsNegative(&dfp.a); \
560 is_zero = decNumberIsZero(&dfp.a); \
561 is_extreme_exp = (dfp.a.exponent == maxexp) || \
562 (dfp.a.exponent == minexp); \
563 is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \
564 is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \
565 leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \
566 (dfp.a.lsu[nzero_idx] != 0); \
567 match = 0; \
569 match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \
570 match |= (dcm & 0x10) && is_zero && is_extreme_exp; \
571 match |= (dcm & 0x08) && \
572 (is_subnormal || (is_normal && is_extreme_exp)); \
573 match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \
574 !leftmost_is_nonzero; \
575 match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \
576 leftmost_is_nonzero; \
577 match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \
579 if (is_negative) { \
580 dfp.crbf = match ? 0xA : 0x8; \
581 } else { \
582 dfp.crbf = match ? 0x2 : 0x0; \
585 dfp_set_FPCC_from_CRBF(&dfp); \
586 return dfp.crbf; \
589 DFP_HELPER_TSTDG(dtstdg, 64)
590 DFP_HELPER_TSTDG(dtstdgq, 128)
592 #define DFP_HELPER_TSTEX(op, size) \
593 uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
595 struct PPC_DFP dfp; \
596 int expa, expb, a_is_special, b_is_special; \
598 dfp_prepare_decimal##size(&dfp, a, b, env); \
600 expa = dfp.a.exponent; \
601 expb = dfp.b.exponent; \
602 a_is_special = decNumberIsSpecial(&dfp.a); \
603 b_is_special = decNumberIsSpecial(&dfp.b); \
605 if (a_is_special || b_is_special) { \
606 int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \
607 int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \
608 dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \
609 } else if (expa < expb) { \
610 dfp.crbf = 0x8; \
611 } else if (expa > expb) { \
612 dfp.crbf = 0x4; \
613 } else { \
614 dfp.crbf = 0x2; \
617 dfp_set_FPCC_from_CRBF(&dfp); \
618 return dfp.crbf; \
621 DFP_HELPER_TSTEX(dtstex, 64)
622 DFP_HELPER_TSTEX(dtstexq, 128)
624 #define DFP_HELPER_TSTSF(op, size) \
625 uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
627 struct PPC_DFP dfp; \
628 unsigned k; \
629 uint64_t a64; \
631 dfp_prepare_decimal##size(&dfp, 0, b, env); \
633 get_dfp64(&a64, a); \
634 k = a64 & 0x3F; \
636 if (unlikely(decNumberIsSpecial(&dfp.b))) { \
637 dfp.crbf = 1; \
638 } else if (k == 0) { \
639 dfp.crbf = 4; \
640 } else if (unlikely(decNumberIsZero(&dfp.b))) { \
641 /* Zero has no sig digits */ \
642 dfp.crbf = 4; \
643 } else { \
644 unsigned nsd = dfp.b.digits; \
645 if (k < nsd) { \
646 dfp.crbf = 8; \
647 } else if (k > nsd) { \
648 dfp.crbf = 4; \
649 } else { \
650 dfp.crbf = 2; \
654 dfp_set_FPCC_from_CRBF(&dfp); \
655 return dfp.crbf; \
658 DFP_HELPER_TSTSF(dtstsf, 64)
659 DFP_HELPER_TSTSF(dtstsfq, 128)
661 #define DFP_HELPER_TSTSFI(op, size) \
662 uint32_t helper_##op(CPUPPCState *env, uint32_t a, uint64_t *b) \
664 struct PPC_DFP dfp; \
665 unsigned uim; \
667 dfp_prepare_decimal##size(&dfp, 0, b, env); \
669 uim = a & 0x3F; \
671 if (unlikely(decNumberIsSpecial(&dfp.b))) { \
672 dfp.crbf = 1; \
673 } else if (uim == 0) { \
674 dfp.crbf = 4; \
675 } else if (unlikely(decNumberIsZero(&dfp.b))) { \
676 /* Zero has no sig digits */ \
677 dfp.crbf = 4; \
678 } else { \
679 unsigned nsd = dfp.b.digits; \
680 if (uim < nsd) { \
681 dfp.crbf = 8; \
682 } else if (uim > nsd) { \
683 dfp.crbf = 4; \
684 } else { \
685 dfp.crbf = 2; \
689 dfp_set_FPCC_from_CRBF(&dfp); \
690 return dfp.crbf; \
693 DFP_HELPER_TSTSFI(dtstsfi, 64)
694 DFP_HELPER_TSTSFI(dtstsfiq, 128)
696 static void QUA_PPs(struct PPC_DFP *dfp)
698 dfp_set_FPRF_from_FRT(dfp);
699 dfp_check_for_XX(dfp);
700 dfp_check_for_VXSNAN(dfp);
701 dfp_check_for_VXCVI(dfp);
704 static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp)
706 dfp_set_round_mode_from_immediate(0, rmc, dfp);
707 decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context);
708 if (decNumberIsSNaN(&dfp->a)) {
709 dfp->t = dfp->a;
710 dfp_makeQNaN(&dfp->t);
711 } else if (decNumberIsSNaN(&dfp->b)) {
712 dfp->t = dfp->b;
713 dfp_makeQNaN(&dfp->t);
714 } else if (decNumberIsQNaN(&dfp->a)) {
715 dfp->t = dfp->a;
716 } else if (decNumberIsQNaN(&dfp->b)) {
717 dfp->t = dfp->b;
721 #define DFP_HELPER_QUAI(op, size) \
722 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, \
723 uint32_t te, uint32_t rmc) \
725 struct PPC_DFP dfp; \
727 dfp_prepare_decimal##size(&dfp, 0, b, env); \
729 decNumberFromUInt32(&dfp.a, 1); \
730 dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \
732 dfp_quantize(rmc, &dfp); \
733 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
734 &dfp.context); \
735 QUA_PPs(&dfp); \
737 if (size == 64) { \
738 t[0] = dfp.t64[0]; \
739 } else if (size == 128) { \
740 t[0] = dfp.t64[HI_IDX]; \
741 t[1] = dfp.t64[LO_IDX]; \
745 DFP_HELPER_QUAI(dquai, 64)
746 DFP_HELPER_QUAI(dquaiq, 128)
748 #define DFP_HELPER_QUA(op, size) \
749 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
750 uint64_t *b, uint32_t rmc) \
752 struct PPC_DFP dfp; \
754 dfp_prepare_decimal##size(&dfp, a, b, env); \
756 dfp_quantize(rmc, &dfp); \
757 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
758 &dfp.context); \
759 QUA_PPs(&dfp); \
761 if (size == 64) { \
762 t[0] = dfp.t64[0]; \
763 } else if (size == 128) { \
764 t[0] = dfp.t64[HI_IDX]; \
765 t[1] = dfp.t64[LO_IDX]; \
769 DFP_HELPER_QUA(dqua, 64)
770 DFP_HELPER_QUA(dquaq, 128)
772 static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax,
773 struct PPC_DFP *dfp)
775 int msd_orig, msd_rslt;
777 if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) {
778 dfp->t = dfp->b;
779 if (decNumberIsSNaN(&dfp->b)) {
780 dfp_makeQNaN(&dfp->t);
781 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE);
783 return;
786 /* Reround is equivalent to quantizing b with 1**E(n) where */
787 /* n = exp(b) + numDigits(b) - reference_significance. */
789 decNumberFromUInt32(&dfp->a, 1);
790 dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig;
792 if (unlikely(dfp->a.exponent > xmax)) {
793 dfp->t.digits = 0;
794 dfp->t.bits &= ~DECNEG;
795 dfp_makeQNaN(&dfp->t);
796 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
797 return;
800 dfp_quantize(rmc, dfp);
802 msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1);
803 msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1);
805 /* If the quantization resulted in rounding up to the next magnitude, */
806 /* then we need to shift the significand and adjust the exponent. */
808 if (unlikely((msd_orig == 9) && (msd_rslt == 1))) {
810 decNumber negone;
812 decNumberFromInt32(&negone, -1);
813 decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context);
814 dfp->t.exponent++;
816 if (unlikely(dfp->t.exponent > xmax)) {
817 dfp_makeQNaN(&dfp->t);
818 dfp->t.digits = 0;
819 dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
820 /* Inhibit XX in this case */
821 decContextClearStatus(&dfp->context, DEC_Inexact);
826 #define DFP_HELPER_RRND(op, size) \
827 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
828 uint64_t *b, uint32_t rmc) \
830 struct PPC_DFP dfp; \
831 uint64_t a64; \
832 int32_t ref_sig; \
833 int32_t xmax = ((size) == 64) ? 369 : 6111; \
835 dfp_prepare_decimal##size(&dfp, 0, b, env); \
837 get_dfp64(&a64, a); \
838 ref_sig = a64 & 0x3f; \
840 _dfp_reround(rmc, ref_sig, xmax, &dfp); \
841 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
842 &dfp.context); \
843 QUA_PPs(&dfp); \
845 if (size == 64) { \
846 t[0] = dfp.t64[0]; \
847 } else if (size == 128) { \
848 t[0] = dfp.t64[HI_IDX]; \
849 t[1] = dfp.t64[LO_IDX]; \
853 DFP_HELPER_RRND(drrnd, 64)
854 DFP_HELPER_RRND(drrndq, 128)
856 #define DFP_HELPER_RINT(op, postprocs, size) \
857 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, \
858 uint32_t r, uint32_t rmc) \
860 struct PPC_DFP dfp; \
862 dfp_prepare_decimal##size(&dfp, 0, b, env); \
864 dfp_set_round_mode_from_immediate(r, rmc, &dfp); \
865 decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \
866 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
867 postprocs(&dfp); \
869 if (size == 64) { \
870 t[0] = dfp.t64[0]; \
871 } else if (size == 128) { \
872 t[0] = dfp.t64[HI_IDX]; \
873 t[1] = dfp.t64[LO_IDX]; \
877 static void RINTX_PPs(struct PPC_DFP *dfp)
879 dfp_set_FPRF_from_FRT(dfp);
880 dfp_check_for_XX(dfp);
881 dfp_check_for_VXSNAN(dfp);
884 DFP_HELPER_RINT(drintx, RINTX_PPs, 64)
885 DFP_HELPER_RINT(drintxq, RINTX_PPs, 128)
887 static void RINTN_PPs(struct PPC_DFP *dfp)
889 dfp_set_FPRF_from_FRT(dfp);
890 dfp_check_for_VXSNAN(dfp);
893 DFP_HELPER_RINT(drintn, RINTN_PPs, 64)
894 DFP_HELPER_RINT(drintnq, RINTN_PPs, 128)
896 void helper_dctdp(CPUPPCState *env, uint64_t *t, uint64_t *b)
898 struct PPC_DFP dfp;
899 uint64_t b64;
900 uint32_t b_short;
902 get_dfp64(&b64, b);
903 b_short = (uint32_t)b64;
905 dfp_prepare_decimal64(&dfp, 0, 0, env);
906 decimal32ToNumber((decimal32 *)&b_short, &dfp.t);
907 decimal64FromNumber((decimal64 *)t, &dfp.t, &dfp.context);
908 dfp_set_FPRF_from_FRT(&dfp);
911 void helper_dctqpq(CPUPPCState *env, uint64_t *t, uint64_t *b)
913 struct PPC_DFP dfp;
914 uint64_t b64;
915 dfp_prepare_decimal128(&dfp, 0, 0, env);
916 get_dfp64(&b64, b);
917 decimal64ToNumber((decimal64 *)&b64, &dfp.t);
919 dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
920 dfp_set_FPRF_from_FRT(&dfp);
922 decimal128FromNumber((decimal128 *)&dfp.t64, &dfp.t, &dfp.context);
923 t[0] = dfp.t64[HI_IDX];
924 t[1] = dfp.t64[LO_IDX];
927 void helper_drsp(CPUPPCState *env, uint64_t *t, uint64_t *b)
929 struct PPC_DFP dfp;
930 uint32_t t_short = 0;
931 dfp_prepare_decimal64(&dfp, 0, b, env);
932 decimal32FromNumber((decimal32 *)&t_short, &dfp.b, &dfp.context);
933 decimal32ToNumber((decimal32 *)&t_short, &dfp.t);
935 dfp_set_FPRF_from_FRT_short(&dfp);
936 dfp_check_for_OX(&dfp);
937 dfp_check_for_UX(&dfp);
938 dfp_check_for_XX(&dfp);
940 *t = t_short;
943 void helper_drdpq(CPUPPCState *env, uint64_t *t, uint64_t *b)
945 struct PPC_DFP dfp;
946 dfp_prepare_decimal128(&dfp, 0, b, env);
947 decimal64FromNumber((decimal64 *)&dfp.t64, &dfp.b, &dfp.context);
948 decimal64ToNumber((decimal64 *)&dfp.t64, &dfp.t);
950 dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
951 dfp_set_FPRF_from_FRT_long(&dfp);
952 dfp_check_for_OX(&dfp);
953 dfp_check_for_UX(&dfp);
954 dfp_check_for_XX(&dfp);
956 decimal64FromNumber((decimal64 *)dfp.t64, &dfp.t, &dfp.context);
957 t[0] = dfp.t64[0];
958 t[1] = 0;
961 #define DFP_HELPER_CFFIX(op, size) \
962 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b) \
964 struct PPC_DFP dfp; \
965 uint64_t b64; \
966 dfp_prepare_decimal##size(&dfp, 0, b, env); \
967 get_dfp64(&b64, b); \
968 decNumberFromInt64(&dfp.t, (int64_t)b64); \
969 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
970 CFFIX_PPs(&dfp); \
972 if (size == 64) { \
973 t[0] = dfp.t64[0]; \
974 } else if (size == 128) { \
975 t[0] = dfp.t64[HI_IDX]; \
976 t[1] = dfp.t64[LO_IDX]; \
980 static void CFFIX_PPs(struct PPC_DFP *dfp)
982 dfp_set_FPRF_from_FRT(dfp);
983 dfp_check_for_XX(dfp);
986 DFP_HELPER_CFFIX(dcffix, 64)
987 DFP_HELPER_CFFIX(dcffixq, 128)
989 #define DFP_HELPER_CTFIX(op, size) \
990 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b) \
992 struct PPC_DFP dfp; \
993 dfp_prepare_decimal##size(&dfp, 0, b, env); \
995 if (unlikely(decNumberIsSpecial(&dfp.b))) { \
996 uint64_t invalid_flags = FP_VX | FP_VXCVI; \
997 if (decNumberIsInfinite(&dfp.b)) { \
998 dfp.t64[0] = decNumberIsNegative(&dfp.b) ? INT64_MIN : INT64_MAX; \
999 } else { /* NaN */ \
1000 dfp.t64[0] = INT64_MIN; \
1001 if (decNumberIsSNaN(&dfp.b)) { \
1002 invalid_flags |= FP_VXSNAN; \
1005 dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); \
1006 } else if (unlikely(decNumberIsZero(&dfp.b))) { \
1007 dfp.t64[0] = 0; \
1008 } else { \
1009 decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); \
1010 dfp.t64[0] = decNumberIntegralToInt64(&dfp.b, &dfp.context); \
1011 if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { \
1012 dfp.t64[0] = decNumberIsNegative(&dfp.b) ? INT64_MIN : INT64_MAX; \
1013 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); \
1014 } else { \
1015 dfp_check_for_XX(&dfp); \
1019 *t = dfp.t64[0]; \
1022 DFP_HELPER_CTFIX(dctfix, 64)
1023 DFP_HELPER_CTFIX(dctfixq, 128)
1025 static inline void dfp_set_bcd_digit_64(uint64_t *t, uint8_t digit,
1026 unsigned n)
1028 *t |= ((uint64_t)(digit & 0xF) << (n << 2));
1031 static inline void dfp_set_bcd_digit_128(uint64_t *t, uint8_t digit,
1032 unsigned n)
1034 t[(n & 0x10) ? HI_IDX : LO_IDX] |=
1035 ((uint64_t)(digit & 0xF) << ((n & 15) << 2));
1038 static inline void dfp_set_sign_64(uint64_t *t, uint8_t sgn)
1040 *t <<= 4;
1041 *t |= (sgn & 0xF);
1044 static inline void dfp_set_sign_128(uint64_t *t, uint8_t sgn)
1046 t[HI_IDX] <<= 4;
1047 t[HI_IDX] |= (t[LO_IDX] >> 60);
1048 t[LO_IDX] <<= 4;
1049 t[LO_IDX] |= (sgn & 0xF);
1052 #define DFP_HELPER_DEDPD(op, size) \
1053 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, uint32_t sp) \
1055 struct PPC_DFP dfp; \
1056 uint8_t digits[34]; \
1057 int i, N; \
1059 dfp_prepare_decimal##size(&dfp, 0, b, env); \
1061 decNumberGetBCD(&dfp.b, digits); \
1062 dfp.t64[0] = dfp.t64[1] = 0; \
1063 N = dfp.b.digits; \
1065 for (i = 0; (i < N) && (i < (size)/4); i++) { \
1066 dfp_set_bcd_digit_##size(dfp.t64, digits[N-i-1], i); \
1069 if (sp & 2) { \
1070 uint8_t sgn; \
1072 if (decNumberIsNegative(&dfp.b)) { \
1073 sgn = 0xD; \
1074 } else { \
1075 sgn = ((sp & 1) ? 0xF : 0xC); \
1077 dfp_set_sign_##size(dfp.t64, sgn); \
1080 if (size == 64) { \
1081 t[0] = dfp.t64[0]; \
1082 } else if (size == 128) { \
1083 t[0] = dfp.t64[HI_IDX]; \
1084 t[1] = dfp.t64[LO_IDX]; \
1088 DFP_HELPER_DEDPD(ddedpd, 64)
1089 DFP_HELPER_DEDPD(ddedpdq, 128)
1091 static inline uint8_t dfp_get_bcd_digit_64(uint64_t *t, unsigned n)
1093 return *t >> ((n << 2) & 63) & 15;
1096 static inline uint8_t dfp_get_bcd_digit_128(uint64_t *t, unsigned n)
1098 return t[(n & 0x10) ? HI_IDX : LO_IDX] >> ((n << 2) & 63) & 15;
1101 #define DFP_HELPER_ENBCD(op, size) \
1102 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, uint32_t s) \
1104 struct PPC_DFP dfp; \
1105 uint8_t digits[32]; \
1106 int n = 0, offset = 0, sgn = 0, nonzero = 0; \
1108 dfp_prepare_decimal##size(&dfp, 0, b, env); \
1110 decNumberZero(&dfp.t); \
1112 if (s) { \
1113 uint8_t sgnNibble = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
1114 switch (sgnNibble) { \
1115 case 0xD: \
1116 case 0xB: \
1117 sgn = 1; \
1118 break; \
1119 case 0xC: \
1120 case 0xF: \
1121 case 0xA: \
1122 case 0xE: \
1123 sgn = 0; \
1124 break; \
1125 default: \
1126 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
1127 return; \
1131 while (offset < (size) / 4) { \
1132 n++; \
1133 digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
1134 if (digits[(size) / 4 - n] > 10) { \
1135 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
1136 return; \
1137 } else { \
1138 nonzero |= (digits[(size) / 4 - n] > 0); \
1142 if (nonzero) { \
1143 decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \
1146 if (s && sgn) { \
1147 dfp.t.bits |= DECNEG; \
1149 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
1150 &dfp.context); \
1151 dfp_set_FPRF_from_FRT(&dfp); \
1152 if ((size) == 64) { \
1153 t[0] = dfp.t64[0]; \
1154 } else if ((size) == 128) { \
1155 t[0] = dfp.t64[HI_IDX]; \
1156 t[1] = dfp.t64[LO_IDX]; \
1160 DFP_HELPER_ENBCD(denbcd, 64)
1161 DFP_HELPER_ENBCD(denbcdq, 128)
1163 #define DFP_HELPER_XEX(op, size) \
1164 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b) \
1166 struct PPC_DFP dfp; \
1168 dfp_prepare_decimal##size(&dfp, 0, b, env); \
1170 if (unlikely(decNumberIsSpecial(&dfp.b))) { \
1171 if (decNumberIsInfinite(&dfp.b)) { \
1172 *t = -1; \
1173 } else if (decNumberIsSNaN(&dfp.b)) { \
1174 *t = -3; \
1175 } else if (decNumberIsQNaN(&dfp.b)) { \
1176 *t = -2; \
1177 } else { \
1178 assert(0); \
1180 } else { \
1181 if ((size) == 64) { \
1182 *t = dfp.b.exponent + 398; \
1183 } else if ((size) == 128) { \
1184 *t = dfp.b.exponent + 6176; \
1185 } else { \
1186 assert(0); \
1191 DFP_HELPER_XEX(dxex, 64)
1192 DFP_HELPER_XEX(dxexq, 128)
1194 static void dfp_set_raw_exp_64(uint64_t *t, uint64_t raw)
1196 *t &= 0x8003ffffffffffffULL;
1197 *t |= (raw << (63 - 13));
1200 static void dfp_set_raw_exp_128(uint64_t *t, uint64_t raw)
1202 t[HI_IDX] &= 0x80003fffffffffffULL;
1203 t[HI_IDX] |= (raw << (63 - 17));
1206 #define DFP_HELPER_IEX(op, size) \
1207 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, uint64_t *b) \
1209 struct PPC_DFP dfp; \
1210 uint64_t raw_qnan, raw_snan, raw_inf, max_exp, a64; \
1211 int bias; \
1212 int64_t exp; \
1214 get_dfp64(&a64, a); \
1215 exp = (int64_t)a64; \
1216 dfp_prepare_decimal##size(&dfp, 0, b, env); \
1218 if ((size) == 64) { \
1219 max_exp = 767; \
1220 raw_qnan = 0x1F00; \
1221 raw_snan = 0x1F80; \
1222 raw_inf = 0x1E00; \
1223 bias = 398; \
1224 } else if ((size) == 128) { \
1225 max_exp = 12287; \
1226 raw_qnan = 0x1f000; \
1227 raw_snan = 0x1f800; \
1228 raw_inf = 0x1e000; \
1229 bias = 6176; \
1230 } else { \
1231 assert(0); \
1234 if (unlikely((exp < 0) || (exp > max_exp))) { \
1235 dfp.t64[0] = dfp.b64[0]; \
1236 dfp.t64[1] = dfp.b64[1]; \
1237 if (exp == -1) { \
1238 dfp_set_raw_exp_##size(dfp.t64, raw_inf); \
1239 } else if (exp == -3) { \
1240 dfp_set_raw_exp_##size(dfp.t64, raw_snan); \
1241 } else { \
1242 dfp_set_raw_exp_##size(dfp.t64, raw_qnan); \
1244 } else { \
1245 dfp.t = dfp.b; \
1246 if (unlikely(decNumberIsSpecial(&dfp.t))) { \
1247 dfp.t.bits &= ~DECSPECIAL; \
1249 dfp.t.exponent = exp - bias; \
1250 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
1251 &dfp.context); \
1253 if (size == 64) { \
1254 t[0] = dfp.t64[0]; \
1255 } else if (size == 128) { \
1256 t[0] = dfp.t64[HI_IDX]; \
1257 t[1] = dfp.t64[LO_IDX]; \
1261 DFP_HELPER_IEX(diex, 64)
1262 DFP_HELPER_IEX(diexq, 128)
1264 static void dfp_clear_lmd_from_g5msb(uint64_t *t)
1267 /* The most significant 5 bits of the PowerPC DFP format combine bits */
1268 /* from the left-most decimal digit (LMD) and the biased exponent. */
1269 /* This routine clears the LMD bits while preserving the exponent */
1270 /* bits. See "Figure 80: Encoding of bits 0:4 of the G field for */
1271 /* Finite Numbers" in the Power ISA for additional details. */
1273 uint64_t g5msb = (*t >> 58) & 0x1F;
1275 if ((g5msb >> 3) < 3) { /* LMD in [0-7] ? */
1276 *t &= ~(7ULL << 58);
1277 } else {
1278 switch (g5msb & 7) {
1279 case 0:
1280 case 1:
1281 g5msb = 0;
1282 break;
1283 case 2:
1284 case 3:
1285 g5msb = 0x8;
1286 break;
1287 case 4:
1288 case 5:
1289 g5msb = 0x10;
1290 break;
1291 case 6:
1292 g5msb = 0x1E;
1293 break;
1294 case 7:
1295 g5msb = 0x1F;
1296 break;
1299 *t &= ~(0x1fULL << 58);
1300 *t |= (g5msb << 58);
1304 #define DFP_HELPER_SHIFT(op, size, shift_left) \
1305 void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
1306 uint32_t sh) \
1308 struct PPC_DFP dfp; \
1309 unsigned max_digits = ((size) == 64) ? 16 : 34; \
1311 dfp_prepare_decimal##size(&dfp, a, 0, env); \
1313 if (sh <= max_digits) { \
1315 decNumber shd; \
1316 unsigned special = dfp.a.bits & DECSPECIAL; \
1318 if (shift_left) { \
1319 decNumberFromUInt32(&shd, sh); \
1320 } else { \
1321 decNumberFromInt32(&shd, -((int32_t)sh)); \
1324 dfp.a.bits &= ~DECSPECIAL; \
1325 decNumberShift(&dfp.t, &dfp.a, &shd, &dfp.context); \
1327 dfp.t.bits |= special; \
1328 if (special && (dfp.t.digits >= max_digits)) { \
1329 dfp.t.digits = max_digits - 1; \
1332 decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
1333 &dfp.context); \
1334 } else { \
1335 if ((size) == 64) { \
1336 dfp.t64[0] = dfp.a64[0] & 0xFFFC000000000000ULL; \
1337 dfp_clear_lmd_from_g5msb(dfp.t64); \
1338 } else { \
1339 dfp.t64[HI_IDX] = dfp.a64[HI_IDX] & \
1340 0xFFFFC00000000000ULL; \
1341 dfp_clear_lmd_from_g5msb(dfp.t64 + HI_IDX); \
1342 dfp.t64[LO_IDX] = 0; \
1346 if ((size) == 64) { \
1347 t[0] = dfp.t64[0]; \
1348 } else { \
1349 t[0] = dfp.t64[HI_IDX]; \
1350 t[1] = dfp.t64[LO_IDX]; \
1354 DFP_HELPER_SHIFT(dscli, 64, 1)
1355 DFP_HELPER_SHIFT(dscliq, 128, 1)
1356 DFP_HELPER_SHIFT(dscri, 64, 0)
1357 DFP_HELPER_SHIFT(dscriq, 128, 0)