fix minor leak on config file read
[xiph/unicode.git] / Tremor / misc.h
blob49515f16d1860ef9f988e1639fe54bf11469f824
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: miscellaneous math and prototypes
16 ********************************************************************/
18 #ifndef _V_RANDOM_H_
19 #define _V_RANDOM_H_
20 #include "ivorbiscodec.h"
21 #include "os_types.h"
23 #include "asm_arm.h"
25 #ifndef _V_WIDE_MATH
26 #define _V_WIDE_MATH
28 #ifndef _LOW_ACCURACY_
29 /* 64 bit multiply */
31 #include <sys/types.h>
33 #if BYTE_ORDER==LITTLE_ENDIAN
34 union magic {
35 struct {
36 ogg_int32_t lo;
37 ogg_int32_t hi;
38 } halves;
39 ogg_int64_t whole;
41 #endif
43 #if BYTE_ORDER==BIG_ENDIAN
44 union magic {
45 struct {
46 ogg_int32_t hi;
47 ogg_int32_t lo;
48 } halves;
49 ogg_int64_t whole;
51 #endif
53 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
54 union magic magic;
55 magic.whole = (ogg_int64_t)x * y;
56 return magic.halves.hi;
59 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
60 return MULT32(x,y)<<1;
63 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
64 union magic magic;
65 magic.whole = (ogg_int64_t)x * y;
66 return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
69 #else
70 /* 32 bit multiply, more portable but less accurate */
73 * Note: Precision is biased towards the first argument therefore ordering
74 * is important. Shift values were chosen for the best sound quality after
75 * many listening tests.
79 * For MULT32 and MULT31: The second argument is always a lookup table
80 * value already preshifted from 31 to 8 bits. We therefore take the
81 * opportunity to save on text space and use unsigned char for those
82 * tables in this case.
85 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
86 return (x >> 9) * y; /* y preshifted >>23 */
89 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
90 return (x >> 8) * y; /* y preshifted >>23 */
93 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
94 return (x >> 6) * y; /* y preshifted >>9 */
97 #endif
100 * This should be used as a memory barrier, forcing all cached values in
101 * registers to wr writen back to memory. Might or might not be beneficial
102 * depending on the architecture and compiler.
104 #define MB()
107 * The XPROD functions are meant to optimize the cross products found all
108 * over the place in mdct.c by forcing memory operation ordering to avoid
109 * unnecessary register reloads as soon as memory is being written to.
110 * However this is only beneficial on CPUs with a sane number of general
111 * purpose registers which exclude the Intel x86. On Intel, better let the
112 * compiler actually reload registers directly from original memory by using
113 * macros.
116 #ifdef __i386__
118 #define XPROD32(_a, _b, _t, _v, _x, _y) \
119 { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \
120 *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
121 #define XPROD31(_a, _b, _t, _v, _x, _y) \
122 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
123 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
124 #define XNPROD31(_a, _b, _t, _v, _x, _y) \
125 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
126 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
128 #else
130 static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
131 ogg_int32_t t, ogg_int32_t v,
132 ogg_int32_t *x, ogg_int32_t *y)
134 *x = MULT32(a, t) + MULT32(b, v);
135 *y = MULT32(b, t) - MULT32(a, v);
138 static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
139 ogg_int32_t t, ogg_int32_t v,
140 ogg_int32_t *x, ogg_int32_t *y)
142 *x = MULT31(a, t) + MULT31(b, v);
143 *y = MULT31(b, t) - MULT31(a, v);
146 static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
147 ogg_int32_t t, ogg_int32_t v,
148 ogg_int32_t *x, ogg_int32_t *y)
150 *x = MULT31(a, t) - MULT31(b, v);
151 *y = MULT31(b, t) + MULT31(a, v);
154 #endif
156 #endif
158 #ifndef _V_CLIP_MATH
159 #define _V_CLIP_MATH
161 static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
162 int ret=x;
163 ret-= ((x<=32767)-1)&(x-32767);
164 ret-= ((x>=-32768)-1)&(x+32768);
165 return(ret);
168 #endif
170 static inline ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
171 ogg_int32_t b,ogg_int32_t bp,
172 ogg_int32_t *p){
173 if(a && b){
174 #ifndef _LOW_ACCURACY_
175 *p=ap+bp+32;
176 return MULT32(a,b);
177 #else
178 *p=ap+bp+31;
179 return (a>>15)*(b>>16);
180 #endif
181 }else
182 return 0;
185 static inline ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
186 ogg_int32_t i,
187 ogg_int32_t *p){
189 int ip=_ilog(abs(i))-31;
190 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
193 static inline ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
194 ogg_int32_t b,ogg_int32_t bp,
195 ogg_int32_t *p){
197 if(!a){
198 *p=bp;
199 return b;
200 }else if(!b){
201 *p=ap;
202 return a;
205 /* yes, this can leak a bit. */
206 if(ap>bp){
207 int shift=ap-bp+1;
208 *p=ap+1;
209 a>>=1;
210 if(shift<32){
211 b=(b+(1<<(shift-1)))>>shift;
212 }else{
213 b=0;
215 }else{
216 int shift=bp-ap+1;
217 *p=bp+1;
218 b>>=1;
219 if(shift<32){
220 a=(a+(1<<(shift-1)))>>shift;
221 }else{
222 a=0;
226 a+=b;
227 if((a&0xc0000000)==0xc0000000 ||
228 (a&0xc0000000)==0){
229 a<<=1;
230 (*p)--;
232 return(a);
235 #endif