Move plugin init code
[gnumeric.git] / src / sf-trig.c
blobe1935da35f26af535a6537892b7521d440082b2d
1 #include <gnumeric-config.h>
2 #include <sf-trig.h>
3 #include <mathfunc.h>
5 /* ------------------------------------------------------------------------- */
7 static gnm_float
8 gnm_cot_helper (volatile gnm_float *x)
10 gnm_float s = gnm_sin (*x);
11 gnm_float c = gnm_cos (*x);
13 if (s == 0)
14 return gnm_nan;
15 else
16 return c / s;
19 /**
20 * gnm_cot:
21 * @x: an angle in radians
23 * Returns: The co-tangent of the given angle.
25 gnm_float
26 gnm_cot (gnm_float x)
28 /* See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59089 */
29 return gnm_cot_helper (&x);
32 /**
33 * gnm_acot:
34 * @x: a number
36 * Returns: The inverse co-tangent of the given number.
38 gnm_float
39 gnm_acot (gnm_float x)
41 if (gnm_finite (x)) {
42 if (x == 0)
43 return M_PIgnum / 2;
44 return gnm_atan (1 / x);
45 } else {
46 /* +inf -> +0 */
47 /* -Inf -> -0 */
48 /* +-NaN -> +-NaN */
49 return 1 / x;
53 /**
54 * gnm_coth:
55 * @x: a number.
57 * Returns: The hyperbolic co-tangent of the given number.
59 gnm_float
60 gnm_coth (gnm_float x)
62 return 1 / gnm_tanh (x);
65 /**
66 * gnm_acoth:
67 * @x: a number
69 * Returns: The inverse hyperbolic co-tangent of the given number.
71 gnm_float
72 gnm_acoth (gnm_float x)
74 return (gnm_abs (x) > 2)
75 ? gnm_log1p (2 / (x - 1)) / 2
76 : gnm_log ((x - 1) / (x + 1)) / -2;
79 /* ------------------------------------------------------------------------- */
82 static gnm_float
83 reduce_pi_simple (gnm_float x, int *pk, int kbits)
85 static const gnm_float two_over_pi = GNM_const(0.63661977236758134307553505349);
86 static const gnm_float pi_over_two[] = {
87 +0x1.921fb5p+0,
88 +0x1.110b46p-26,
89 +0x1.1a6263p-54,
90 +0x1.8a2e03p-81,
91 +0x1.c1cd12p-107
93 int i;
94 gnm_float k = gnm_floor (x * gnm_ldexp (two_over_pi, kbits - 2) + 0.5);
95 gnm_float xx = 0;
97 g_assert (k < (1 << 26));
98 *pk = (int)k;
100 if (k == 0)
101 return x;
103 x -= k * gnm_ldexp (pi_over_two[0], 2 - kbits);
104 for (i = 1; i < 5; i++) {
105 gnm_float dx = k * gnm_ldexp (pi_over_two[i], 2 - kbits);
106 gnm_float s = x - dx;
107 xx += x - s - dx;
108 x = s;
111 return x + xx;
115 * Add the 64-bit number p at *dst and backwards. This would be very
116 * simple and fast in assembler. In C it's a bit of a mess.
118 static inline void
119 add_at (guint32 *dst, guint64 p)
121 unsigned h = p >> 63;
123 p += *dst;
124 *dst-- = p & 0xffffffffu;
125 p >>= 32;
126 if (p) {
127 p += *dst;
128 *dst-- = p & 0xffffffffu;
129 if (p >> 32)
130 while (++*dst == 0)
131 dst--;
132 } else if (h) {
133 /* p overflowed, pass carry on. */
134 dst--;
135 while (++*dst == 0)
136 dst--;
140 static gnm_float
141 reduce_pi_full (gnm_float x, int *pk, int kbits)
143 /* FIXME? This table isn't big enough for long double's range */
144 static const guint32 one_over_two_pi[] = {
145 0x28be60dbu,
146 0x9391054au,
147 0x7f09d5f4u,
148 0x7d4d3770u,
149 0x36d8a566u,
150 0x4f10e410u,
151 0x7f9458eau,
152 0xf7aef158u,
153 0x6dc91b8eu,
154 0x909374b8u,
155 0x01924bbau,
156 0x82746487u,
157 0x3f877ac7u,
158 0x2c4a69cfu,
159 0xba208d7du,
160 0x4baed121u,
161 0x3a671c09u,
162 0xad17df90u,
163 0x4e64758eu,
164 0x60d4ce7du,
165 0x272117e2u,
166 0xef7e4a0eu,
167 0xc7fe25ffu,
168 0xf7816603u,
169 0xfbcbc462u,
170 0xd6829b47u,
171 0xdb4d9fb3u,
172 0xc9f2c26du,
173 0xd3d18fd9u,
174 0xa797fa8bu,
175 0x5d49eeb1u,
176 0xfaf97c5eu,
177 0xcf41ce7du,
178 0xe294a4bau,
179 0x9afed7ecu,
180 0x47e35742u
182 static const guint32 pi_over_four[] = {
183 0xc90fdaa2u,
184 0x2168c234u,
185 0xc4c6628bu,
186 0x80dc1cd1u
189 gnm_float m;
190 guint32 w2, w1, w0, wm1, wm2;
191 int e, neg;
192 unsigned di, i, j;
193 guint32 r[6], r4[4];
194 gnm_float rh, rl, l48, h48;
196 m = gnm_frexp (x, &e);
197 if (e >= GNM_MANT_DIG) {
198 di = ((unsigned)e - GNM_MANT_DIG) / 32u;
199 e -= di * 32;
200 } else
201 di = 0;
202 m = gnm_ldexp (m, e - 64);
203 w2 = (guint32)gnm_floor (m); m = gnm_ldexp (m - w2, 32);
204 w1 = (guint32)gnm_floor (m); m = gnm_ldexp (m - w1, 32);
205 w0 = (guint32)gnm_floor (m); m = gnm_ldexp (m - w0, 32);
206 wm1 = (guint32)gnm_floor (m); m = gnm_ldexp (m - wm1, 32);
207 wm2 = (guint32)gnm_floor (m);
210 * r[0] is an integer overflow area to be ignored. We will not create
211 * any carry into r[-1] because 5/(2pi) < 1.
213 r[0] = 0;
215 for (i = 0; i < 5; i++) {
216 g_assert (i + 2 + di < G_N_ELEMENTS (one_over_two_pi));
217 r[i + 1] = 0;
218 if (wm2 && i > 1)
219 add_at (&r[i + 1], (guint64)wm2 * one_over_two_pi[i - 2]);
220 if (wm1 && i > 0)
221 add_at (&r[i + 1], (guint64)wm1 * one_over_two_pi[i - 1]);
222 if (w0)
223 add_at (&r[i + 1], (guint64)w0 * one_over_two_pi[i + di]);
224 if (w1)
225 add_at (&r[i + 1], (guint64)w1 * one_over_two_pi[i + 1 + di]);
226 if (w2)
227 add_at (&r[i + 1], (guint64)w2 * one_over_two_pi[i + 2 + di]);
230 * We're done at i==3 unless the first 31-kbits bits, not counting
231 * those ending up in sign and *pk, are all zeros or ones.
233 if (i == 3 && ((r[1] + 1u) & (0x7fffffffu >> kbits)) > 1)
234 break;
237 *pk = kbits ? (r[1] >> (32 - kbits)) : 0;
238 if ((neg = ((r[1] >> (31 - kbits)) & 1))) {
239 (*pk)++;
240 /* Two-complement negation */
241 for (j = 1; j <= i; j++) r[j] ^= 0xffffffffu;
242 add_at (&r[i], 1);
244 r[1] &= (0xffffffffu >> kbits);
246 j = 1;
247 if (r[j] == 0) j++;
248 r4[0] = r4[1] = r4[2] = r4[3] = 0;
249 add_at (&r4[1], (guint64)r[j ] * pi_over_four[0]);
250 add_at (&r4[2], (guint64)r[j ] * pi_over_four[1]);
251 add_at (&r4[2], (guint64)r[j + 1] * pi_over_four[0]);
252 add_at (&r4[3], (guint64)r[j ] * pi_over_four[2]);
253 add_at (&r4[3], (guint64)r[j + 1] * pi_over_four[1]);
254 add_at (&r4[3], (guint64)r[j + 2] * pi_over_four[0]);
256 h48 = gnm_ldexp (((guint64)r4[0] << 16) | (r4[1] >> 16),
257 -32 * j + (kbits + 1) - 16);
258 l48 = gnm_ldexp (((guint64)(r4[1] & 0xffff) << 32) | r4[2],
259 -32 * j + (kbits + 1) - 64);
261 rh = h48 + l48;
262 rl = h48 - rh + l48;
264 if (neg) {
265 rh = -rh;
266 rl = -rl;
269 return gnm_ldexp (rh + rl, 2 - kbits);
273 * gnm_reduce_pi:
274 * @x: number of reduce
275 * @e: scale between -1 and 8, inclusive.
276 * @k: (out): location to return lower @e+1 bits of reduction count
278 * This function performs range reduction for trigonometric functions.
280 * Returns: a value, xr, such that x = xr + j * Pi/2^@e for some integer
281 * number j and |xr| <= Pi/2^(@e+1). The lower @e+1 bits of j will be
282 * returned in @k.
284 gnm_float
285 gnm_reduce_pi (gnm_float x, int e, int *k)
287 gnm_float xr;
288 void *state;
290 g_return_val_if_fail (e >= -1 && e <= 8, x);
291 g_return_val_if_fail (k != NULL, x);
293 if (!gnm_finite (x)) {
294 *k = 0;
295 return x * gnm_nan;
299 * We aren't actually using quads, but we rely somewhat on
300 * proper ieee double semantics.
302 state = gnm_quad_start ();
304 if (gnm_abs (x) < (1u << (27 - e)))
305 xr = reduce_pi_simple (gnm_abs (x), k, e + 1);
306 else
307 xr = reduce_pi_full (gnm_abs (x), k, e + 1);
309 if (x < 0) {
310 xr = -xr;
311 *k = -*k;
313 *k &= ((1 << (e + 1)) - 1);
315 gnm_quad_end (state);
317 return xr;
323 #ifdef GNM_REDUCES_TRIG_RANGE
325 gnm_float
326 gnm_sin (gnm_float x)
328 int km4;
329 gnm_float xr = gnm_reduce_pi (x, 1, &km4);
331 switch (km4) {
332 default:
333 case 0: return +sin (xr);
334 case 1: return +cos (xr);
335 case 2: return -sin (xr);
336 case 3: return -cos (xr);
340 gnm_float
341 gnm_cos (gnm_float x)
343 int km4;
344 gnm_float xr = gnm_reduce_pi (x, 1, &km4);
346 switch (km4) {
347 default:
348 case 0: return +cos (xr);
349 case 1: return -sin (xr);
350 case 2: return -cos (xr);
351 case 3: return +sin (xr);
355 gnm_float
356 gnm_tan (gnm_float x)
358 int km4;
359 gnm_float xr = gnm_reduce_pi (x, 1, &km4);
361 switch (km4) {
362 default:
363 case 0: case 2: return +tan (xr);
364 case 1: case 3: return -cos (xr) / sin (xr);
368 #endif
370 /* ------------------------------------------------------------------------- */