Updated our source code header to explicitly mention that we are GPL v2 or
[Rockbox.git] / apps / codecs / Tremor / asm_arm.h
blobbc09ac51700257f12882295705448221e6fa78ee
1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
18 #ifdef _ARM_ASSEM_
20 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
21 #define _V_WIDE_MATH
23 static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
24 int lo,hi;
25 asm volatile("smull\t%0, %1, %2, %3"
26 : "=&r"(lo),"=&r"(hi)
27 : "%r"(x),"r"(y)
28 : "cc");
29 return(hi);
32 static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
33 return MULT32(x,y)<<1;
36 static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
37 int lo,hi;
38 asm volatile("smull %0, %1, %2, %3\n\t"
39 "movs %0, %0, lsr #15\n\t"
40 "adc %1, %0, %1, lsl #17\n\t"
41 : "=&r"(lo),"=&r"(hi)
42 : "%r"(x),"r"(y)
43 : "cc");
44 return(hi);
47 #define MB() asm volatile ("" : : : "memory")
49 #define XPROD32(a, b, t, v, x, y) \
50 { \
51 long l; \
52 asm( "smull %0, %1, %4, %6\n\t" \
53 "smlal %0, %1, %5, %7\n\t" \
54 "rsb %3, %4, #0\n\t" \
55 "smull %0, %2, %5, %6\n\t" \
56 "smlal %0, %2, %3, %7" \
57 : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
58 : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \
59 : "cc" ); \
62 static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
63 ogg_int32_t t, ogg_int32_t v,
64 ogg_int32_t *x, ogg_int32_t *y)
66 int x1, y1, l;
67 asm( "smull %0, %1, %4, %6\n\t"
68 "smlal %0, %1, %5, %7\n\t"
69 "rsb %3, %4, #0\n\t"
70 "smull %0, %2, %5, %6\n\t"
71 "smlal %0, %2, %3, %7"
72 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
73 : "3" (a), "r" (b), "r" (t), "r" (v)
74 : "cc" );
75 *x = x1 << 1;
76 MB();
77 *y = y1 << 1;
80 static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
81 ogg_int32_t t, ogg_int32_t v,
82 ogg_int32_t *x, ogg_int32_t *y)
84 int x1, y1, l;
85 asm( "rsb %2, %4, #0\n\t"
86 "smull %0, %1, %3, %5\n\t"
87 "smlal %0, %1, %2, %6\n\t"
88 "smull %0, %2, %4, %5\n\t"
89 "smlal %0, %2, %3, %6"
90 : "=&r" (l), "=&r" (x1), "=&r" (y1)
91 : "r" (a), "r" (b), "r" (t), "r" (v)
92 : "cc" );
93 *x = x1 << 1;
94 MB();
95 *y = y1 << 1;
98 #ifndef _V_VECT_OPS
99 #define _V_VECT_OPS
101 /* asm versions of vector operations for block.c, window.c */
102 static inline
103 void vect_add(ogg_int32_t *x, ogg_int32_t *y, int n)
105 while (n>=4) {
106 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
107 "ldmia %[y]!, {r4, r5, r6, r7};"
108 "add r0, r0, r4;"
109 "add r1, r1, r5;"
110 "add r2, r2, r6;"
111 "add r3, r3, r7;"
112 "stmia %[x]!, {r0, r1, r2, r3};"
113 : [x] "+r" (x), [y] "+r" (y)
114 : : "r0", "r1", "r2", "r3",
115 "r4", "r5", "r6", "r7",
116 "memory");
117 n -= 4;
119 /* add final elements */
120 while (n>0) {
121 *x++ += *y++;
122 n--;
126 static inline
127 void vect_copy(ogg_int32_t *x, ogg_int32_t *y, int n)
129 while (n>=4) {
130 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
131 "stmia %[x]!, {r0, r1, r2, r3};"
132 : [x] "+r" (x), [y] "+r" (y)
133 : : "r0", "r1", "r2", "r3",
134 "memory");
135 n -= 4;
137 /* copy final elements */
138 while (n>0) {
139 *x++ = *y++;
140 n--;
144 static inline
145 void vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n)
147 while (n>=4) {
148 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
149 "ldmia %[w]!, {r4, r5, r6, r7};"
150 "smull r8, r9, r0, r4;"
151 "mov r0, r9, lsl #1;"
152 "smull r8, r9, r1, r5;"
153 "mov r1, r9, lsl #1;"
154 "smull r8, r9, r2, r6;"
155 "mov r2, r9, lsl #1;"
156 "smull r8, r9, r3, r7;"
157 "mov r3, r9, lsl #1;"
158 "stmia %[d]!, {r0, r1, r2, r3};"
159 : [d] "+r" (data), [w] "+r" (window)
160 : : "r0", "r1", "r2", "r3",
161 "r4", "r5", "r6", "r7", "r8", "r9",
162 "memory", "cc");
163 n -= 4;
165 while(n>0) {
166 *data = MULT31(*data, *window);
167 data++;
168 window++;
169 n--;
173 static inline
174 void vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n)
176 while (n>=4) {
177 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
178 "ldmda %[w]!, {r4, r5, r6, r7};"
179 "smull r8, r9, r0, r7;"
180 "mov r0, r9, lsl #1;"
181 "smull r8, r9, r1, r6;"
182 "mov r1, r9, lsl #1;"
183 "smull r8, r9, r2, r5;"
184 "mov r2, r9, lsl #1;"
185 "smull r8, r9, r3, r4;"
186 "mov r3, r9, lsl #1;"
187 "stmia %[d]!, {r0, r1, r2, r3};"
188 : [d] "+r" (data), [w] "+r" (window)
189 : : "r0", "r1", "r2", "r3",
190 "r4", "r5", "r6", "r7", "r8", "r9",
191 "memory", "cc");
192 n -= 4;
194 while(n>0) {
195 *data = MULT31(*data, *window);
196 data++;
197 window--;
198 n--;
202 #endif
204 #endif
206 #ifndef _V_CLIP_MATH
207 #define _V_CLIP_MATH
209 static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
210 int tmp;
211 asm volatile("subs %1, %0, #32768\n\t"
212 "movpl %0, #0x7f00\n\t"
213 "orrpl %0, %0, #0xff\n"
214 "adds %1, %0, #32768\n\t"
215 "movmi %0, #0x8000"
216 : "+r"(x),"=r"(tmp)
218 : "cc");
219 return(x);
222 #endif
224 #ifndef _V_LSP_MATH_ASM
225 #define _V_LSP_MATH_ASM
227 static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
228 ogg_int32_t *qexpp,
229 ogg_int32_t *ilsp,ogg_int32_t wi,
230 ogg_int32_t m){
232 ogg_uint32_t qi=*qip,pi=*pip;
233 ogg_int32_t qexp=*qexpp;
235 asm("mov r0,%3;"
236 "mov r1,%5,asr#1;"
237 "add r0,r0,r1,lsl#3;"
238 "1:"
240 "ldmdb r0!,{r1,r3};"
241 "subs r1,r1,%4;" //ilsp[j]-wi
242 "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
243 "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
245 "subs r1,r3,%4;" //ilsp[j+1]-wi
246 "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
247 "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
249 "cmn r2,r3;" // shift down 16?
250 "beq 0f;"
251 "add %2,%2,#16;"
252 "mov %0,%0,lsr #16;"
253 "orr %0,%0,r2,lsl #16;"
254 "mov %1,%1,lsr #16;"
255 "orr %1,%1,r3,lsl #16;"
256 "0:"
257 "cmp r0,%3;\n"
258 "bhi 1b;\n"
260 // odd filter assymetry
261 "ands r0,%5,#1;\n"
262 "beq 2f;\n"
263 "add r0,%3,%5,lsl#2;\n"
265 "ldr r1,[r0,#-4];\n"
266 "mov r0,#0x4000;\n"
268 "subs r1,r1,%4;\n" //ilsp[j]-wi
269 "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
270 "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
271 "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
273 "cmn r2,r3;\n" // shift down 16?
274 "beq 2f;\n"
275 "add %2,%2,#16;\n"
276 "mov %0,%0,lsr #16;\n"
277 "orr %0,%0,r2,lsl #16;\n"
278 "mov %1,%1,lsr #16;\n"
279 "orr %1,%1,r3,lsl #16;\n"
281 //qi=(pi>>shift)*labs(ilsp[j]-wi);
282 //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
283 //qexp+=shift;
287 /* normalize to max 16 sig figs */
288 "2:"
289 "mov r2,#0;"
290 "orr r1,%0,%1;"
291 "tst r1,#0xff000000;"
292 "addne r2,r2,#8;"
293 "movne r1,r1,lsr #8;"
294 "tst r1,#0x00f00000;"
295 "addne r2,r2,#4;"
296 "movne r1,r1,lsr #4;"
297 "tst r1,#0x000c0000;"
298 "addne r2,r2,#2;"
299 "movne r1,r1,lsr #2;"
300 "tst r1,#0x00020000;"
301 "addne r2,r2,#1;"
302 "movne r1,r1,lsr #1;"
303 "tst r1,#0x00010000;"
304 "addne r2,r2,#1;"
305 "mov %0,%0,lsr r2;"
306 "mov %1,%1,lsr r2;"
307 "add %2,%2,r2;"
309 : "+r"(qi),"+r"(pi),"+r"(qexp)
310 : "r"(ilsp),"r"(wi),"r"(m)
311 : "r0","r1","r2","r3","cc");
313 *qip=qi;
314 *pip=pi;
315 *qexpp=qexp;
318 static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){
320 ogg_uint32_t qi=*qip;
321 ogg_int32_t qexp=*qexpp;
323 asm("tst %0,#0x0000ff00;"
324 "moveq %0,%0,lsl #8;"
325 "subeq %1,%1,#8;"
326 "tst %0,#0x0000f000;"
327 "moveq %0,%0,lsl #4;"
328 "subeq %1,%1,#4;"
329 "tst %0,#0x0000c000;"
330 "moveq %0,%0,lsl #2;"
331 "subeq %1,%1,#2;"
332 "tst %0,#0x00008000;"
333 "moveq %0,%0,lsl #1;"
334 "subeq %1,%1,#1;"
335 : "+r"(qi),"+r"(qexp)
337 : "cc");
338 *qip=qi;
339 *qexpp=qexp;
342 #endif
343 #endif