Added backend & sample for Android
[openal-soft/android/lowlatency.git] / android / jni / tremolo / misc.h
blobd0a522a39215c2aee17c32a4d8d59dc7c6ddb5c8
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2003 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: miscellaneous math and prototypes
16 ********************************************************************/
18 #ifndef _V_RANDOM_H_
19 #define _V_RANDOM_H_
20 #include "ivorbiscodec.h"
21 #include "os_types.h"
23 /*#define _VDBG_GRAPHFILE "_0.m"*/
26 #ifdef _VDBG_GRAPHFILE
27 extern void *_VDBG_malloc(void *ptr,long bytes,char *file,long line);
28 extern void _VDBG_free(void *ptr,char *file,long line);
30 #undef _ogg_malloc
31 #undef _ogg_calloc
32 #undef _ogg_realloc
33 #undef _ogg_free
35 #define _ogg_malloc(x) _VDBG_malloc(NULL,(x),__FILE__,__LINE__)
36 #define _ogg_calloc(x,y) _VDBG_malloc(NULL,(x)*(y),__FILE__,__LINE__)
37 #define _ogg_realloc(x,y) _VDBG_malloc((x),(y),__FILE__,__LINE__)
38 #define _ogg_free(x) _VDBG_free((x),__FILE__,__LINE__)
39 #endif
41 #include "asm_arm.h"
43 #ifndef _V_WIDE_MATH
44 #define _V_WIDE_MATH
46 #ifndef _LOW_ACCURACY_
47 /* 64 bit multiply */
49 #include <sys/types.h>
51 #if BYTE_ORDER==LITTLE_ENDIAN
52 union magic {
53 struct {
54 ogg_int32_t lo;
55 ogg_int32_t hi;
56 } halves;
57 ogg_int64_t whole;
59 #endif
61 #if BYTE_ORDER==BIG_ENDIAN
62 union magic {
63 struct {
64 ogg_int32_t hi;
65 ogg_int32_t lo;
66 } halves;
67 ogg_int64_t whole;
69 #endif
71 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
72 union magic magic;
73 magic.whole = (ogg_int64_t)x * y;
74 return magic.halves.hi;
77 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
78 return MULT32(x,y)<<1;
81 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
82 union magic magic;
83 magic.whole = (ogg_int64_t)x * y;
84 return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
87 #else
88 /* 32 bit multiply, more portable but less accurate */
91 * Note: Precision is biased towards the first argument therefore ordering
92 * is important. Shift values were chosen for the best sound quality after
93 * many listening tests.
97 * For MULT32 and MULT31: The second argument is always a lookup table
98 * value already preshifted from 31 to 8 bits. We therefore take the
99 * opportunity to save on text space and use unsigned char for those
100 * tables in this case.
103 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
104 return (x >> 9) * y; /* y preshifted >>23 */
107 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
108 return (x >> 8) * y; /* y preshifted >>23 */
111 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
112 return (x >> 6) * y; /* y preshifted >>9 */
115 #endif
118 * This should be used as a memory barrier, forcing all cached values in
119 * registers to wr writen back to memory. Might or might not be beneficial
120 * depending on the architecture and compiler.
122 #define MB()
125 * The XPROD functions are meant to optimize the cross products found all
126 * over the place in mdct.c by forcing memory operation ordering to avoid
127 * unnecessary register reloads as soon as memory is being written to.
128 * However this is only beneficial on CPUs with a sane number of general
129 * purpose registers which exclude the Intel x86. On Intel, better let the
130 * compiler actually reload registers directly from original memory by using
131 * macros.
134 #ifdef __i386__
136 #define XPROD32(_a, _b, _t, _v, _x, _y) \
137 { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \
138 *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
139 #define XPROD31(_a, _b, _t, _v, _x, _y) \
140 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
141 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
142 #define XNPROD31(_a, _b, _t, _v, _x, _y) \
143 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
144 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
146 #else
148 static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
149 ogg_int32_t t, ogg_int32_t v,
150 ogg_int32_t *x, ogg_int32_t *y)
152 *x = MULT32(a, t) + MULT32(b, v);
153 *y = MULT32(b, t) - MULT32(a, v);
156 static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
157 ogg_int32_t t, ogg_int32_t v,
158 ogg_int32_t *x, ogg_int32_t *y)
160 *x = MULT31(a, t) + MULT31(b, v);
161 *y = MULT31(b, t) - MULT31(a, v);
164 static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
165 ogg_int32_t t, ogg_int32_t v,
166 ogg_int32_t *x, ogg_int32_t *y)
168 *x = MULT31(a, t) - MULT31(b, v);
169 *y = MULT31(b, t) + MULT31(a, v);
172 #endif
174 #endif
176 #ifndef _V_CLIP_MATH
177 #define _V_CLIP_MATH
179 static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
180 int ret=x;
181 ret-= ((x<=32767)-1)&(x-32767);
182 ret-= ((x>=-32768)-1)&(x+32768);
183 return(ret);
186 #endif
188 #endif