improve handling of unimplemented commands
[kugel-rb.git] / apps / codecs / lib / misc.h
blob5ab78d62e79d082eea99e53ccb6af671d187295a
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: miscellaneous math and prototypes
16 ********************************************************************/
18 //#include "config-tremor.h"
20 #ifndef _V_RANDOM_H_
21 #define _V_RANDOM_H_
22 //#include "ivorbiscodec.h"
23 //#include "os_types.h"
25 //#include "asm_arm.h"
26 //#include "asm_mcf5249.h"
29 /* Some prototypes that were not defined elsewhere */
30 //void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
31 //void _vorbis_block_ripcord(vorbis_block *vb);
32 //extern int _ilog(unsigned int v);
34 #ifndef _V_WIDE_MATH
35 #define _V_WIDE_MATH
37 #ifndef _LOW_ACCURACY_
38 /* 64 bit multiply */
39 /* #include <sys/types.h> */
41 #if BYTE_ORDER==LITTLE_ENDIAN
42 union magic {
43 struct {
44 int32_t lo;
45 int32_t hi;
46 } halves;
47 int64_t whole;
49 #elif BYTE_ORDER==BIG_ENDIAN
50 union magic {
51 struct {
52 int32_t hi;
53 int32_t lo;
54 } halves;
55 int64_t whole;
57 #endif
59 static inline int32_t MULT32(int32_t x, int32_t y) {
60 union magic magic;
61 magic.whole = (int64_t)x * y;
62 return magic.halves.hi;
64 static inline int32_t MULT31(int32_t x, int32_t y) {
65 return MULT32(x,y)<<1;
68 static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
69 union magic magic;
70 magic.whole = (int64_t)x * y;
71 return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
74 #else
75 /* 32 bit multiply, more portable but less accurate */
78 * Note: Precision is biased towards the first argument therefore ordering
79 * is important. Shift values were chosen for the best sound quality after
80 * many listening tests.
84 * For MULT32 and MULT31: The second argument is always a lookup table
85 * value already preshifted from 31 to 8 bits. We therefore take the
86 * opportunity to save on text space and use unsigned char for those
87 * tables in this case.
90 static inline int32_t MULT32(int32_t x, int32_t y) {
91 return (x >> 9) * y; /* y preshifted >>23 */
94 static inline int32_t MULT31(int32_t x, int32_t y) {
95 return (x >> 8) * y; /* y preshifted >>23 */
98 static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
99 return (x >> 6) * y; /* y preshifted >>9 */
101 #endif
104 * This should be used as a memory barrier, forcing all cached values in
105 * registers to wr writen back to memory. Might or might not be beneficial
106 * depending on the architecture and compiler.
108 #define MB()
111 * The XPROD functions are meant to optimize the cross products found all
112 * over the place in mdct.c by forcing memory operation ordering to avoid
113 * unnecessary register reloads as soon as memory is being written to.
114 * However this is only beneficial on CPUs with a sane number of general
115 * purpose registers which exclude the Intel x86. On Intel, better let the
116 * compiler actually reload registers directly from original memory by using
117 * macros.
120 /* replaced XPROD32 with a macro to avoid memory reference
121 _x, _y are the results (must be l-values) */
122 #define XPROD32(_a, _b, _t, _v, _x, _y) \
123 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
124 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
127 #ifdef __i386__
129 #define XPROD31(_a, _b, _t, _v, _x, _y) \
130 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
131 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
132 #define XNPROD31(_a, _b, _t, _v, _x, _y) \
133 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
134 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
136 #else
138 static inline void XPROD31(int32_t a, int32_t b,
139 int32_t t, int32_t v,
140 int32_t *x, int32_t *y)
142 *x = MULT31(a, t) + MULT31(b, v);
143 *y = MULT31(b, t) - MULT31(a, v);
146 static inline void XNPROD31(int32_t a, int32_t b,
147 int32_t t, int32_t v,
148 int32_t *x, int32_t *y)
150 *x = MULT31(a, t) - MULT31(b, v);
151 *y = MULT31(b, t) + MULT31(a, v);
153 #endif
155 #ifndef _V_VECT_OPS
156 #define _V_VECT_OPS
158 static inline
159 void vect_add(int32_t *x, int32_t *y, int n)
161 while (n>0) {
162 *x++ += *y++;
163 n--;
167 static inline
168 void vect_copy(int32_t *x, int32_t *y, int n)
170 while (n>0) {
171 *x++ = *y++;
172 n--;
176 static inline
177 void vect_mult_fw(int32_t *data, int32_t *window, int n)
179 while(n>0) {
180 *data = MULT31(*data, *window);
181 data++;
182 window++;
183 n--;
187 static inline
188 void vect_mult_bw(int32_t *data, int32_t *window, int n)
190 while(n>0) {
191 *data = MULT31(*data, *window);
192 data++;
193 window--;
194 n--;
197 #endif
199 #endif
201 #ifndef _V_CLIP_MATH
202 #define _V_CLIP_MATH
204 static inline int32_t CLIP_TO_15(int32_t x) {
205 int ret=x;
206 ret-= ((x<=32767)-1)&(x-32767);
207 ret-= ((x>=-32768)-1)&(x+32768);
208 return(ret);
211 #endif
213 static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap,
214 int32_t b,int32_t bp,
215 int32_t *p){
216 if(a && b){
217 #ifndef _LOW_ACCURACY_
218 *p=ap+bp+32;
219 return MULT32(a,b);
220 #else
221 *p=ap+bp+31;
222 return (a>>15)*(b>>16);
223 #endif
224 }else
225 return 0;
228 /*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap,
229 int32_t i,
230 int32_t *p){
232 int ip=_ilog(abs(i))-31;
233 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
236 static inline int32_t VFLOAT_ADD(int32_t a,int32_t ap,
237 int32_t b,int32_t bp,
238 int32_t *p){
240 if(!a){
241 *p=bp;
242 return b;
243 }else if(!b){
244 *p=ap;
245 return a;
248 /* yes, this can leak a bit. */
249 if(ap>bp){
250 int shift=ap-bp+1;
251 *p=ap+1;
252 a>>=1;
253 if(shift<32){
254 b=(b+(1<<(shift-1)))>>shift;
255 }else{
256 b=0;
258 }else{
259 int shift=bp-ap+1;
260 *p=bp+1;
261 b>>=1;
262 if(shift<32){
263 a=(a+(1<<(shift-1)))>>shift;
264 }else{
265 a=0;
269 a+=b;
270 if((a&0xc0000000)==0xc0000000 ||
271 (a&0xc0000000)==0){
272 a<<=1;
273 (*p)--;
275 return(a);
278 #ifdef __GNUC__
279 #if __GNUC__ >= 3
280 #define EXPECT(a, b) __builtin_expect((a), (b))
281 #else
282 #define EXPECT(a, b) (a)
283 #endif
284 #else
285 #define EXPECT(a, b) (a)
286 #endif
288 #endif