Cast to uint64_t for 64-bit store
[glibc.git] / soft-fp / extended.h
blobaf9c6e65a8ba1d0c6f43d07b61a1f720e2de0263
1 /* Software floating-point emulation.
2 Definitions for IEEE Extended Precision.
3 Copyright (C) 1999,2006,2007,2012 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Jakub Jelinek (jj@ultra.linux.cz).
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 In addition to the permissions in the GNU Lesser General Public
13 License, the Free Software Foundation gives you unlimited
14 permission to link the compiled version of this file into
15 combinations with other programs, and to distribute those
16 combinations without any restriction coming from the use of this
17 file. (The Lesser General Public License restrictions do apply in
18 other respects; for example, they cover modification of the file,
19 and distribution when not linked into a combine executable.)
21 The GNU C Library is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 Lesser General Public License for more details.
26 You should have received a copy of the GNU Lesser General Public
27 License along with the GNU C Library; if not, see
28 <http://www.gnu.org/licenses/>. */
30 #if _FP_W_TYPE_SIZE < 32
31 #error "Here's a nickel, kid. Go buy yourself a real computer."
32 #endif
34 #if _FP_W_TYPE_SIZE < 64
35 #define _FP_FRACTBITS_E (4*_FP_W_TYPE_SIZE)
36 #else
37 #define _FP_FRACTBITS_E (2*_FP_W_TYPE_SIZE)
38 #endif
40 #define _FP_FRACBITS_E 64
41 #define _FP_FRACXBITS_E (_FP_FRACTBITS_E - _FP_FRACBITS_E)
42 #define _FP_WFRACBITS_E (_FP_WORKBITS + _FP_FRACBITS_E)
43 #define _FP_WFRACXBITS_E (_FP_FRACTBITS_E - _FP_WFRACBITS_E)
44 #define _FP_EXPBITS_E 15
45 #define _FP_EXPBIAS_E 16383
46 #define _FP_EXPMAX_E 32767
48 #define _FP_QNANBIT_E \
49 ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2) % _FP_W_TYPE_SIZE)
50 #define _FP_QNANBIT_SH_E \
51 ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
52 #define _FP_IMPLBIT_E \
53 ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1) % _FP_W_TYPE_SIZE)
54 #define _FP_IMPLBIT_SH_E \
55 ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
56 #define _FP_OVERFLOW_E \
57 ((_FP_W_TYPE)1 << (_FP_WFRACBITS_E % _FP_W_TYPE_SIZE))
59 typedef float XFtype __attribute__((mode(XF)));
61 #if _FP_W_TYPE_SIZE < 64
63 union _FP_UNION_E
65 XFtype flt;
66 struct _FP_STRUCT_LAYOUT
68 #if __BYTE_ORDER == __BIG_ENDIAN
69 unsigned long pad1 : _FP_W_TYPE_SIZE;
70 unsigned long pad2 : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
71 unsigned long sign : 1;
72 unsigned long exp : _FP_EXPBITS_E;
73 unsigned long frac1 : _FP_W_TYPE_SIZE;
74 unsigned long frac0 : _FP_W_TYPE_SIZE;
75 #else
76 unsigned long frac0 : _FP_W_TYPE_SIZE;
77 unsigned long frac1 : _FP_W_TYPE_SIZE;
78 unsigned exp : _FP_EXPBITS_E;
79 unsigned sign : 1;
80 #endif /* not bigendian */
81 } bits __attribute__((packed));
85 #define FP_DECL_E(X) _FP_DECL(4,X)
87 #define FP_UNPACK_RAW_E(X, val) \
88 do { \
89 union _FP_UNION_E _flo; _flo.flt = (val); \
91 X##_f[2] = 0; X##_f[3] = 0; \
92 X##_f[0] = _flo.bits.frac0; \
93 X##_f[1] = _flo.bits.frac1; \
94 X##_e = _flo.bits.exp; \
95 X##_s = _flo.bits.sign; \
96 } while (0)
98 #define FP_UNPACK_RAW_EP(X, val) \
99 do { \
100 union _FP_UNION_E *_flo = \
101 (union _FP_UNION_E *)(val); \
103 X##_f[2] = 0; X##_f[3] = 0; \
104 X##_f[0] = _flo->bits.frac0; \
105 X##_f[1] = _flo->bits.frac1; \
106 X##_e = _flo->bits.exp; \
107 X##_s = _flo->bits.sign; \
108 } while (0)
110 #define FP_PACK_RAW_E(val, X) \
111 do { \
112 union _FP_UNION_E _flo; \
114 if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
115 else X##_f[1] &= ~(_FP_IMPLBIT_E); \
116 _flo.bits.frac0 = X##_f[0]; \
117 _flo.bits.frac1 = X##_f[1]; \
118 _flo.bits.exp = X##_e; \
119 _flo.bits.sign = X##_s; \
121 (val) = _flo.flt; \
122 } while (0)
124 #define FP_PACK_RAW_EP(val, X) \
125 do { \
126 if (!FP_INHIBIT_RESULTS) \
128 union _FP_UNION_E *_flo = \
129 (union _FP_UNION_E *)(val); \
131 if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
132 else X##_f[1] &= ~(_FP_IMPLBIT_E); \
133 _flo->bits.frac0 = X##_f[0]; \
134 _flo->bits.frac1 = X##_f[1]; \
135 _flo->bits.exp = X##_e; \
136 _flo->bits.sign = X##_s; \
138 } while (0)
140 #define FP_UNPACK_E(X,val) \
141 do { \
142 FP_UNPACK_RAW_E(X,val); \
143 _FP_UNPACK_CANONICAL(E,4,X); \
144 } while (0)
146 #define FP_UNPACK_EP(X,val) \
147 do { \
148 FP_UNPACK_RAW_EP(X,val); \
149 _FP_UNPACK_CANONICAL(E,4,X); \
150 } while (0)
152 #define FP_UNPACK_SEMIRAW_E(X,val) \
153 do { \
154 FP_UNPACK_RAW_E(X,val); \
155 _FP_UNPACK_SEMIRAW(E,4,X); \
156 } while (0)
158 #define FP_UNPACK_SEMIRAW_EP(X,val) \
159 do { \
160 FP_UNPACK_RAW_EP(X,val); \
161 _FP_UNPACK_SEMIRAW(E,4,X); \
162 } while (0)
164 #define FP_PACK_E(val,X) \
165 do { \
166 _FP_PACK_CANONICAL(E,4,X); \
167 FP_PACK_RAW_E(val,X); \
168 } while (0)
170 #define FP_PACK_EP(val,X) \
171 do { \
172 _FP_PACK_CANONICAL(E,4,X); \
173 FP_PACK_RAW_EP(val,X); \
174 } while (0)
176 #define FP_PACK_SEMIRAW_E(val,X) \
177 do { \
178 _FP_PACK_SEMIRAW(E,4,X); \
179 FP_PACK_RAW_E(val,X); \
180 } while (0)
182 #define FP_PACK_SEMIRAW_EP(val,X) \
183 do { \
184 _FP_PACK_SEMIRAW(E,4,X); \
185 FP_PACK_RAW_EP(val,X); \
186 } while (0)
188 #define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,4,X)
189 #define FP_NEG_E(R,X) _FP_NEG(E,4,R,X)
190 #define FP_ADD_E(R,X,Y) _FP_ADD(E,4,R,X,Y)
191 #define FP_SUB_E(R,X,Y) _FP_SUB(E,4,R,X,Y)
192 #define FP_MUL_E(R,X,Y) _FP_MUL(E,4,R,X,Y)
193 #define FP_DIV_E(R,X,Y) _FP_DIV(E,4,R,X,Y)
194 #define FP_SQRT_E(R,X) _FP_SQRT(E,4,R,X)
197 * Square root algorithms:
198 * We have just one right now, maybe Newton approximation
199 * should be added for those machines where division is fast.
200 * This has special _E version because standard _4 square
201 * root would not work (it has to start normally with the
202 * second word and not the first), but as we have to do it
203 * anyway, we optimize it by doing most of the calculations
204 * in two UWtype registers instead of four.
207 #define _FP_SQRT_MEAT_E(R, S, T, X, q) \
208 do { \
209 q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
210 _FP_FRAC_SRL_4(X, (_FP_WORKBITS)); \
211 while (q) \
213 T##_f[1] = S##_f[1] + q; \
214 if (T##_f[1] <= X##_f[1]) \
216 S##_f[1] = T##_f[1] + q; \
217 X##_f[1] -= T##_f[1]; \
218 R##_f[1] += q; \
220 _FP_FRAC_SLL_2(X, 1); \
221 q >>= 1; \
223 q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
224 while (q) \
226 T##_f[0] = S##_f[0] + q; \
227 T##_f[1] = S##_f[1]; \
228 if (T##_f[1] < X##_f[1] || \
229 (T##_f[1] == X##_f[1] && \
230 T##_f[0] <= X##_f[0])) \
232 S##_f[0] = T##_f[0] + q; \
233 S##_f[1] += (T##_f[0] > S##_f[0]); \
234 _FP_FRAC_DEC_2(X, T); \
235 R##_f[0] += q; \
237 _FP_FRAC_SLL_2(X, 1); \
238 q >>= 1; \
240 _FP_FRAC_SLL_4(R, (_FP_WORKBITS)); \
241 if (X##_f[0] | X##_f[1]) \
243 if (S##_f[1] < X##_f[1] || \
244 (S##_f[1] == X##_f[1] && \
245 S##_f[0] < X##_f[0])) \
246 R##_f[0] |= _FP_WORK_ROUND; \
247 R##_f[0] |= _FP_WORK_STICKY; \
249 } while (0)
251 #define FP_CMP_E(r,X,Y,un) _FP_CMP(E,4,r,X,Y,un)
252 #define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,4,r,X,Y)
253 #define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,4,r,X,Y)
255 #define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,4,r,X,rsz,rsg)
256 #define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,4,X,r,rs,rt)
258 #define _FP_FRAC_HIGH_E(X) (X##_f[2])
259 #define _FP_FRAC_HIGH_RAW_E(X) (X##_f[1])
261 #else /* not _FP_W_TYPE_SIZE < 64 */
262 union _FP_UNION_E
264 XFtype flt;
265 struct _FP_STRUCT_LAYOUT {
266 #if __BYTE_ORDER == __BIG_ENDIAN
267 _FP_W_TYPE pad : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
268 unsigned sign : 1;
269 unsigned exp : _FP_EXPBITS_E;
270 _FP_W_TYPE frac : _FP_W_TYPE_SIZE;
271 #else
272 _FP_W_TYPE frac : _FP_W_TYPE_SIZE;
273 unsigned exp : _FP_EXPBITS_E;
274 unsigned sign : 1;
275 #endif
276 } bits;
279 #define FP_DECL_E(X) _FP_DECL(2,X)
281 #define FP_UNPACK_RAW_E(X, val) \
282 do { \
283 union _FP_UNION_E _flo; _flo.flt = (val); \
285 X##_f0 = _flo.bits.frac; \
286 X##_f1 = 0; \
287 X##_e = _flo.bits.exp; \
288 X##_s = _flo.bits.sign; \
289 } while (0)
291 #define FP_UNPACK_RAW_EP(X, val) \
292 do { \
293 union _FP_UNION_E *_flo = \
294 (union _FP_UNION_E *)(val); \
296 X##_f0 = _flo->bits.frac; \
297 X##_f1 = 0; \
298 X##_e = _flo->bits.exp; \
299 X##_s = _flo->bits.sign; \
300 } while (0)
302 #define FP_PACK_RAW_E(val, X) \
303 do { \
304 union _FP_UNION_E _flo; \
306 if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
307 else X##_f0 &= ~(_FP_IMPLBIT_E); \
308 _flo.bits.frac = X##_f0; \
309 _flo.bits.exp = X##_e; \
310 _flo.bits.sign = X##_s; \
312 (val) = _flo.flt; \
313 } while (0)
315 #define FP_PACK_RAW_EP(fs, val, X) \
316 do { \
317 if (!FP_INHIBIT_RESULTS) \
319 union _FP_UNION_E *_flo = \
320 (union _FP_UNION_E *)(val); \
322 if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
323 else X##_f0 &= ~(_FP_IMPLBIT_E); \
324 _flo->bits.frac = X##_f0; \
325 _flo->bits.exp = X##_e; \
326 _flo->bits.sign = X##_s; \
328 } while (0)
331 #define FP_UNPACK_E(X,val) \
332 do { \
333 FP_UNPACK_RAW_E(X,val); \
334 _FP_UNPACK_CANONICAL(E,2,X); \
335 } while (0)
337 #define FP_UNPACK_EP(X,val) \
338 do { \
339 FP_UNPACK_RAW_EP(X,val); \
340 _FP_UNPACK_CANONICAL(E,2,X); \
341 } while (0)
343 #define FP_UNPACK_SEMIRAW_E(X,val) \
344 do { \
345 FP_UNPACK_RAW_E(X,val); \
346 _FP_UNPACK_SEMIRAW(E,2,X); \
347 } while (0)
349 #define FP_UNPACK_SEMIRAW_EP(X,val) \
350 do { \
351 FP_UNPACK_RAW_EP(X,val); \
352 _FP_UNPACK_SEMIRAW(E,2,X); \
353 } while (0)
355 #define FP_PACK_E(val,X) \
356 do { \
357 _FP_PACK_CANONICAL(E,2,X); \
358 FP_PACK_RAW_E(val,X); \
359 } while (0)
361 #define FP_PACK_EP(val,X) \
362 do { \
363 _FP_PACK_CANONICAL(E,2,X); \
364 FP_PACK_RAW_EP(val,X); \
365 } while (0)
367 #define FP_PACK_SEMIRAW_E(val,X) \
368 do { \
369 _FP_PACK_SEMIRAW(E,2,X); \
370 FP_PACK_RAW_E(val,X); \
371 } while (0)
373 #define FP_PACK_SEMIRAW_EP(val,X) \
374 do { \
375 _FP_PACK_SEMIRAW(E,2,X); \
376 FP_PACK_RAW_EP(val,X); \
377 } while (0)
379 #define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,2,X)
380 #define FP_NEG_E(R,X) _FP_NEG(E,2,R,X)
381 #define FP_ADD_E(R,X,Y) _FP_ADD(E,2,R,X,Y)
382 #define FP_SUB_E(R,X,Y) _FP_SUB(E,2,R,X,Y)
383 #define FP_MUL_E(R,X,Y) _FP_MUL(E,2,R,X,Y)
384 #define FP_DIV_E(R,X,Y) _FP_DIV(E,2,R,X,Y)
385 #define FP_SQRT_E(R,X) _FP_SQRT(E,2,R,X)
388 * Square root algorithms:
389 * We have just one right now, maybe Newton approximation
390 * should be added for those machines where division is fast.
391 * We optimize it by doing most of the calculations
392 * in one UWtype registers instead of two, although we don't
393 * have to.
395 #define _FP_SQRT_MEAT_E(R, S, T, X, q) \
396 do { \
397 q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
398 _FP_FRAC_SRL_2(X, (_FP_WORKBITS)); \
399 while (q) \
401 T##_f0 = S##_f0 + q; \
402 if (T##_f0 <= X##_f0) \
404 S##_f0 = T##_f0 + q; \
405 X##_f0 -= T##_f0; \
406 R##_f0 += q; \
408 _FP_FRAC_SLL_1(X, 1); \
409 q >>= 1; \
411 _FP_FRAC_SLL_2(R, (_FP_WORKBITS)); \
412 if (X##_f0) \
414 if (S##_f0 < X##_f0) \
415 R##_f0 |= _FP_WORK_ROUND; \
416 R##_f0 |= _FP_WORK_STICKY; \
418 } while (0)
420 #define FP_CMP_E(r,X,Y,un) _FP_CMP(E,2,r,X,Y,un)
421 #define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,2,r,X,Y)
422 #define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,2,r,X,Y)
424 #define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,2,r,X,rsz,rsg)
425 #define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,2,X,r,rs,rt)
427 #define _FP_FRAC_HIGH_E(X) (X##_f1)
428 #define _FP_FRAC_HIGH_RAW_E(X) (X##_f0)
430 #endif /* not _FP_W_TYPE_SIZE < 64 */