Fix some greedy sed changes in imported code. Also provide a sys/types.h for compatib...
[kugel-rb.git] / apps / codecs / lib / codeclib_misc.h
blob0d560755d739aacc43b21a31ccaccaea9f7daaa8
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: miscellaneous math and prototypes
16 ********************************************************************/
18 //#include "config-tremor.h"
20 #ifndef _V_RANDOM_H_
21 #define _V_RANDOM_H_
22 //#include "ivorbiscodec.h"
23 //#include "os_types.h"
25 //#include "asm_arm.h"
26 //#include "asm_mcf5249.h"
29 /* Some prototypes that were not defined elsewhere */
30 //void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
31 //void _vorbis_block_ripcord(vorbis_block *vb);
32 //extern int _ilog(unsigned int v);
34 #ifndef _V_WIDE_MATH
35 #define _V_WIDE_MATH
37 #ifndef ROCKBOX
38 #include <inttypes.h>
39 #endif /* ROCKBOX */
41 #ifndef _LOW_ACCURACY_
42 /* 64 bit multiply */
43 /* #include <sys/types.h> */
45 #if ROCKBOX_LITTLE_ENDIAN == 1
46 union magic {
47 struct {
48 int32_t lo;
49 int32_t hi;
50 } halves;
51 int64_t whole;
53 #elif ROCKBOX_BIG_ENDIAN == 1
54 union magic {
55 struct {
56 int32_t hi;
57 int32_t lo;
58 } halves;
59 int64_t whole;
61 #endif
63 static inline int32_t MULT32(int32_t x, int32_t y) {
64 union magic magic;
65 magic.whole = (int64_t)x * y;
66 return magic.halves.hi;
68 static inline int32_t MULT31(int32_t x, int32_t y) {
69 return MULT32(x,y)<<1;
72 static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
73 union magic magic;
74 magic.whole = (int64_t)x * y;
75 return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
78 #else
79 /* 32 bit multiply, more portable but less accurate */
82 * Note: Precision is biased towards the first argument therefore ordering
83 * is important. Shift values were chosen for the best sound quality after
84 * many listening tests.
88 * For MULT32 and MULT31: The second argument is always a lookup table
89 * value already preshifted from 31 to 8 bits. We therefore take the
90 * opportunity to save on text space and use unsigned char for those
91 * tables in this case.
94 static inline int32_t MULT32(int32_t x, int32_t y) {
95 return (x >> 9) * y; /* y preshifted >>23 */
98 static inline int32_t MULT31(int32_t x, int32_t y) {
99 return (x >> 8) * y; /* y preshifted >>23 */
102 static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
103 return (x >> 6) * y; /* y preshifted >>9 */
105 #endif
108 * The XPROD functions are meant to optimize the cross products found all
109 * over the place in mdct.c by forcing memory operation ordering to avoid
110 * unnecessary register reloads as soon as memory is being written to.
111 * However this is only beneficial on CPUs with a sane number of general
112 * purpose registers which exclude the Intel x86. On Intel, better let the
113 * compiler actually reload registers directly from original memory by using
114 * macros.
117 /* replaced XPROD32 with a macro to avoid memory reference
118 _x, _y are the results (must be l-values) */
119 #define XPROD32(_a, _b, _t, _v, _x, _y) \
120 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
121 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
124 #ifdef __i386__
126 #define XPROD31(_a, _b, _t, _v, _x, _y) \
127 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
128 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
129 #define XNPROD31(_a, _b, _t, _v, _x, _y) \
130 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
131 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
133 #else
135 static inline void XPROD31(int32_t a, int32_t b,
136 int32_t t, int32_t v,
137 int32_t *x, int32_t *y)
139 *x = MULT31(a, t) + MULT31(b, v);
140 *y = MULT31(b, t) - MULT31(a, v);
143 static inline void XNPROD31(int32_t a, int32_t b,
144 int32_t t, int32_t v,
145 int32_t *x, int32_t *y)
147 *x = MULT31(a, t) - MULT31(b, v);
148 *y = MULT31(b, t) + MULT31(a, v);
150 #endif
152 #define XPROD31_R(_a, _b, _t, _v, _x, _y)\
154 _x = MULT31(_a, _t) + MULT31(_b, _v);\
155 _y = MULT31(_b, _t) - MULT31(_a, _v);\
158 #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
160 _x = MULT31(_a, _t) - MULT31(_b, _v);\
161 _y = MULT31(_b, _t) + MULT31(_a, _v);\
165 #ifndef _V_VECT_OPS
166 #define _V_VECT_OPS
168 static inline
169 void vect_add(int32_t *x, int32_t *y, int n)
171 while (n>0) {
172 *x++ += *y++;
173 n--;
177 static inline
178 void vect_copy(int32_t *x, int32_t *y, int n)
180 while (n>0) {
181 *x++ = *y++;
182 n--;
186 static inline
187 void vect_mult_fw(int32_t *data, int32_t *window, int n)
189 while(n>0) {
190 *data = MULT31(*data, *window);
191 data++;
192 window++;
193 n--;
197 static inline
198 void vect_mult_bw(int32_t *data, int32_t *window, int n)
200 while(n>0) {
201 *data = MULT31(*data, *window);
202 data++;
203 window--;
204 n--;
207 #endif
209 #endif
210 /* not used anymore */
212 #ifndef _V_CLIP_MATH
213 #define _V_CLIP_MATH
215 static inline int32_t CLIP_TO_15(int32_t x) {
216 int ret=x;
217 ret-= ((x<=32767)-1)&(x-32767);
218 ret-= ((x>=-32768)-1)&(x+32768);
219 return(ret);
222 #endif
224 static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap,
225 int32_t b,int32_t bp,
226 int32_t *p){
227 if(a && b){
228 #ifndef _LOW_ACCURACY_
229 *p=ap+bp+32;
230 return MULT32(a,b);
231 #else
232 *p=ap+bp+31;
233 return (a>>15)*(b>>16);
234 #endif
235 }else
236 return 0;
239 /*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap,
240 int32_t i,
241 int32_t *p){
243 int ip=_ilog(abs(i))-31;
244 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
247 static inline int32_t VFLOAT_ADD(int32_t a,int32_t ap,
248 int32_t b,int32_t bp,
249 int32_t *p){
251 if(!a){
252 *p=bp;
253 return b;
254 }else if(!b){
255 *p=ap;
256 return a;
259 /* yes, this can leak a bit. */
260 if(ap>bp){
261 int shift=ap-bp+1;
262 *p=ap+1;
263 a>>=1;
264 if(shift<32){
265 b=(b+(1<<(shift-1)))>>shift;
266 }else{
267 b=0;
269 }else{
270 int shift=bp-ap+1;
271 *p=bp+1;
272 b>>=1;
273 if(shift<32){
274 a=(a+(1<<(shift-1)))>>shift;
275 }else{
276 a=0;
280 a+=b;
281 if((a&0xc0000000)==0xc0000000 ||
282 (a&0xc0000000)==0){
283 a<<=1;
284 (*p)--;
286 return(a);
289 #endif