Delete some duplicate inline asm macros since they now live in the codeclib.
[kugel-rb.git] / apps / codecs / libtremor / asm_arm.h
blobada0604a3a4bb7eb9220f58364f58e11487ecd41
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
18 #ifdef _ARM_ASSEM_
20 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
21 #define _V_WIDE_MATH
23 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
24 int lo,hi;
25 asm volatile("smull\t%0, %1, %2, %3"
26 : "=&r"(lo),"=&r"(hi)
27 : "%r"(x),"r"(y) );
28 return(hi);
31 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
32 return MULT32(x,y)<<1;
35 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
36 int lo,hi;
37 asm volatile("smull %0, %1, %2, %3\n\t"
38 "movs %0, %0, lsr #15\n\t"
39 "adc %1, %0, %1, lsl #17\n\t"
40 : "=&r"(lo),"=&r"(hi)
41 : "%r"(x),"r"(y)
42 : "cc");
43 return(hi);
46 #ifndef _V_VECT_OPS
47 #define _V_VECT_OPS
49 /* asm versions of vector operations for block.c, window.c */
50 /* SOME IMPORTANT NOTES: this implementation of vect_mult_bw does
51 NOT do a final shift, meaning that the result of vect_mult_bw is
52 only 31 bits not 32. This is so that we can do the shift in-place
53 in vect_add_xxxx instead to save one instruction for each mult on arm */
54 static inline
55 void vect_add_right_left(ogg_int32_t *x, const ogg_int32_t *y, int n)
57 /* first arg is right subframe of previous frame and second arg
58 is left subframe of current frame. overlap left onto right overwriting
59 the right subframe */
61 do{
62 asm volatile (
63 "ldmia %[x], {r0, r1, r2, r3};"
64 "ldmia %[y]!, {r4, r5, r6, r7};"
65 "add r0, r4, r0, lsl #1;"
66 "add r1, r5, r1, lsl #1;"
67 "add r2, r6, r2, lsl #1;"
68 "add r3, r7, r3, lsl #1;"
69 "stmia %[x]!, {r0, r1, r2, r3};"
70 "ldmia %[x], {r0, r1, r2, r3};"
71 "ldmia %[y]!, {r4, r5, r6, r7};"
72 "add r0, r4, r0, lsl #1;"
73 "add r1, r5, r1, lsl #1;"
74 "add r2, r6, r2, lsl #1;"
75 "add r3, r7, r3, lsl #1;"
76 "stmia %[x]!, {r0, r1, r2, r3};"
77 : [x] "+r" (x), [y] "+r" (y)
78 : : "r0", "r1", "r2", "r3",
79 "r4", "r5", "r6", "r7",
80 "memory");
81 n -= 8;
82 } while (n);
85 static inline
86 void vect_add_left_right(ogg_int32_t *x, const ogg_int32_t *y, int n)
88 /* first arg is left subframe of current frame and second arg
89 is right subframe of previous frame. overlap right onto left overwriting
90 the LEFT subframe */
91 do{
92 asm volatile (
93 "ldmia %[x], {r0, r1, r2, r3};"
94 "ldmia %[y]!, {r4, r5, r6, r7};"
95 "add r0, r0, r4, lsl #1;"
96 "add r1, r1, r5, lsl #1;"
97 "add r2, r2, r6, lsl #1;"
98 "add r3, r3, r7, lsl #1;"
99 "stmia %[x]!, {r0, r1, r2, r3};"
100 "ldmia %[x], {r0, r1, r2, r3};"
101 "ldmia %[y]!, {r4, r5, r6, r7};"
102 "add r0, r0, r4, lsl #1;"
103 "add r1, r1, r5, lsl #1;"
104 "add r2, r2, r6, lsl #1;"
105 "add r3, r3, r7, lsl #1;"
106 "stmia %[x]!, {r0, r1, r2, r3};"
107 : [x] "+r" (x), [y] "+r" (y)
108 : : "r0", "r1", "r2", "r3",
109 "r4", "r5", "r6", "r7",
110 "memory");
111 n -= 8;
112 } while (n);
115 static inline
116 void vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n)
118 /* Note, mult_fw uses MULT31 */
120 asm volatile (
121 "ldmia %[d], {r0, r1, r2, r3};"
122 "ldmia %[w]!, {r4, r5, r6, r7};"
123 "smull r8, r0, r4, r0;"
124 "mov r0, r0, lsl #1;"
125 "smull r8, r1, r5, r1;"
126 "mov r1, r1, lsl #1;"
127 "smull r8, r2, r6, r2;"
128 "mov r2, r2, lsl #1;"
129 "smull r8, r3, r7, r3;"
130 "mov r3, r3, lsl #1;"
131 "stmia %[d]!, {r0, r1, r2, r3};"
132 : [d] "+r" (data), [w] "+r" (window)
133 : : "r0", "r1", "r2", "r3",
134 "r4", "r5", "r6", "r7", "r8",
135 "memory" );
136 n -= 4;
137 } while (n);
140 static inline
141 void vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n)
143 /* NOTE mult_bw uses MULT_32 i.e. doesn't shift result left at end */
144 /* On ARM, we can do the shift at the same time as the overlap-add */
146 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
147 "ldmda %[w]!, {r4, r5, r6, r7};"
148 "smull r8, r0, r7, r0;"
149 "smull r7, r1, r6, r1;"
150 "smull r6, r2, r5, r2;"
151 "smull r5, r3, r4, r3;"
152 "stmia %[d]!, {r0, r1, r2, r3};"
153 : [d] "+r" (data), [w] "+r" (window)
154 : : "r0", "r1", "r2", "r3",
155 "r4", "r5", "r6", "r7", "r8",
156 "memory" );
157 n -= 4;
158 } while (n);
161 static inline void vect_copy(ogg_int32_t *x, const ogg_int32_t *y, int n)
163 memcpy(x,y,n*sizeof(ogg_int32_t));
166 #endif
168 #endif
170 #ifndef _V_CLIP_MATH
171 #define _V_CLIP_MATH
173 static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
174 int tmp;
175 asm volatile("subs %1, %0, #32768\n\t"
176 "movpl %0, #0x7f00\n\t"
177 "orrpl %0, %0, #0xff\n"
178 "adds %1, %0, #32768\n\t"
179 "movmi %0, #0x8000"
180 : "+r"(x),"=r"(tmp)
182 : "cc");
183 return(x);
186 #endif
188 #ifndef _V_LSP_MATH_ASM
189 #define _V_LSP_MATH_ASM
191 static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
192 ogg_int32_t *qexpp,
193 ogg_int32_t *ilsp,ogg_int32_t wi,
194 ogg_int32_t m){
196 ogg_uint32_t qi=*qip,pi=*pip;
197 ogg_int32_t qexp=*qexpp;
199 asm("mov r0,%3;"
200 "mov r1,%5,asr#1;"
201 "add r0,r0,r1,lsl#3;"
202 "1:"
204 "ldmdb r0!,{r1,r3};"
205 "subs r1,r1,%4;" //ilsp[j]-wi
206 "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
207 "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
209 "subs r1,r3,%4;" //ilsp[j+1]-wi
210 "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
211 "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
213 "cmn r2,r3;" // shift down 16?
214 "beq 0f;"
215 "add %2,%2,#16;"
216 "mov %0,%0,lsr #16;"
217 "orr %0,%0,r2,lsl #16;"
218 "mov %1,%1,lsr #16;"
219 "orr %1,%1,r3,lsl #16;"
220 "0:"
221 "cmp r0,%3;\n"
222 "bhi 1b;\n"
224 // odd filter assymetry
225 "ands r0,%5,#1;\n"
226 "beq 2f;\n"
227 "add r0,%3,%5,lsl#2;\n"
229 "ldr r1,[r0,#-4];\n"
230 "mov r0,#0x4000;\n"
232 "subs r1,r1,%4;\n" //ilsp[j]-wi
233 "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
234 "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
235 "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
237 "cmn r2,r3;\n" // shift down 16?
238 "beq 2f;\n"
239 "add %2,%2,#16;\n"
240 "mov %0,%0,lsr #16;\n"
241 "orr %0,%0,r2,lsl #16;\n"
242 "mov %1,%1,lsr #16;\n"
243 "orr %1,%1,r3,lsl #16;\n"
245 //qi=(pi>>shift)*labs(ilsp[j]-wi);
246 //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
247 //qexp+=shift;
251 /* normalize to max 16 sig figs */
252 "2:"
253 "mov r2,#0;"
254 "orr r1,%0,%1;"
255 "tst r1,#0xff000000;"
256 "addne r2,r2,#8;"
257 "movne r1,r1,lsr #8;"
258 "tst r1,#0x00f00000;"
259 "addne r2,r2,#4;"
260 "movne r1,r1,lsr #4;"
261 "tst r1,#0x000c0000;"
262 "addne r2,r2,#2;"
263 "movne r1,r1,lsr #2;"
264 "tst r1,#0x00020000;"
265 "addne r2,r2,#1;"
266 "movne r1,r1,lsr #1;"
267 "tst r1,#0x00010000;"
268 "addne r2,r2,#1;"
269 "mov %0,%0,lsr r2;"
270 "mov %1,%1,lsr r2;"
271 "add %2,%2,r2;"
273 : "+r"(qi),"+r"(pi),"+r"(qexp)
274 : "r"(ilsp),"r"(wi),"r"(m)
275 : "r0","r1","r2","r3","cc");
277 *qip=qi;
278 *pip=pi;
279 *qexpp=qexp;
282 static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){
284 ogg_uint32_t qi=*qip;
285 ogg_int32_t qexp=*qexpp;
287 asm("tst %0,#0x0000ff00;"
288 "moveq %0,%0,lsl #8;"
289 "subeq %1,%1,#8;"
290 "tst %0,#0x0000f000;"
291 "moveq %0,%0,lsl #4;"
292 "subeq %1,%1,#4;"
293 "tst %0,#0x0000c000;"
294 "moveq %0,%0,lsl #2;"
295 "subeq %1,%1,#2;"
296 "tst %0,#0x00008000;"
297 "moveq %0,%0,lsl #1;"
298 "subeq %1,%1,#1;"
299 : "+r"(qi),"+r"(qexp)
301 : "cc");
302 *qip=qi;
303 *qexpp=qexp;
306 #endif
307 #endif