1 /********************************************************************
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
20 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
23 static inline ogg_int32_t
MULT32(ogg_int32_t x
, ogg_int32_t y
) {
25 asm volatile("smull\t%0, %1, %2, %3"
32 static inline ogg_int32_t
MULT31(ogg_int32_t x
, ogg_int32_t y
) {
33 return MULT32(x
,y
)<<1;
36 static inline ogg_int32_t
MULT31_SHIFT15(ogg_int32_t x
, ogg_int32_t y
) {
38 asm volatile("smull %0, %1, %2, %3\n\t"
39 "movs %0, %0, lsr #15\n\t"
40 "adc %1, %0, %1, lsl #17\n\t"
47 #define MB() asm volatile ("" : : : "memory")
49 #define XPROD32(a, b, t, v, x, y) \
52 asm( "smull %0, %1, %4, %6\n\t" \
53 "smlal %0, %1, %5, %7\n\t" \
54 "rsb %3, %4, #0\n\t" \
55 "smull %0, %2, %5, %6\n\t" \
56 "smlal %0, %2, %3, %7" \
57 : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
58 : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \
62 static inline void XPROD31(ogg_int32_t a
, ogg_int32_t b
,
63 ogg_int32_t t
, ogg_int32_t v
,
64 ogg_int32_t
*x
, ogg_int32_t
*y
)
67 asm( "smull %0, %1, %4, %6\n\t"
68 "smlal %0, %1, %5, %7\n\t"
70 "smull %0, %2, %5, %6\n\t"
71 "smlal %0, %2, %3, %7"
72 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
), "=r" (a
)
73 : "3" (a
), "r" (b
), "r" (t
), "r" (v
)
80 static inline void XNPROD31(ogg_int32_t a
, ogg_int32_t b
,
81 ogg_int32_t t
, ogg_int32_t v
,
82 ogg_int32_t
*x
, ogg_int32_t
*y
)
85 asm( "rsb %2, %4, #0\n\t"
86 "smull %0, %1, %3, %5\n\t"
87 "smlal %0, %1, %2, %6\n\t"
88 "smull %0, %2, %4, %5\n\t"
89 "smlal %0, %2, %3, %6"
90 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
)
91 : "r" (a
), "r" (b
), "r" (t
), "r" (v
)
101 /* asm versions of vector operations for block.c, window.c */
103 void vect_add(ogg_int32_t
*x
, ogg_int32_t
*y
, int n
)
106 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
107 "ldmia %[y]!, {r4, r5, r6, r7};"
112 "stmia %[x]!, {r0, r1, r2, r3};"
113 : [x
] "+r" (x
), [y
] "+r" (y
)
114 : : "r0", "r1", "r2", "r3",
115 "r4", "r5", "r6", "r7",
119 /* add final elements */
127 void vect_copy(ogg_int32_t
*x
, ogg_int32_t
*y
, int n
)
130 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
131 "stmia %[x]!, {r0, r1, r2, r3};"
132 : [x
] "+r" (x
), [y
] "+r" (y
)
133 : : "r0", "r1", "r2", "r3",
137 /* copy final elements */
145 void vect_mult_fw(ogg_int32_t
*data
, LOOKUP_T
*window
, int n
)
148 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
149 "ldmia %[w]!, {r4, r5, r6, r7};"
150 "smull r8, r9, r0, r4;"
151 "mov r0, r9, lsl #1;"
152 "smull r8, r9, r1, r5;"
153 "mov r1, r9, lsl #1;"
154 "smull r8, r9, r2, r6;"
155 "mov r2, r9, lsl #1;"
156 "smull r8, r9, r3, r7;"
157 "mov r3, r9, lsl #1;"
158 "stmia %[d]!, {r0, r1, r2, r3};"
159 : [d
] "+r" (data
), [w
] "+r" (window
)
160 : : "r0", "r1", "r2", "r3",
161 "r4", "r5", "r6", "r7", "r8", "r9",
166 *data
= MULT31(*data
, *window
);
174 void vect_mult_bw(ogg_int32_t
*data
, LOOKUP_T
*window
, int n
)
177 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
178 "ldmda %[w]!, {r4, r5, r6, r7};"
179 "smull r8, r9, r0, r7;"
180 "mov r0, r9, lsl #1;"
181 "smull r8, r9, r1, r6;"
182 "mov r1, r9, lsl #1;"
183 "smull r8, r9, r2, r5;"
184 "mov r2, r9, lsl #1;"
185 "smull r8, r9, r3, r4;"
186 "mov r3, r9, lsl #1;"
187 "stmia %[d]!, {r0, r1, r2, r3};"
188 : [d
] "+r" (data
), [w
] "+r" (window
)
189 : : "r0", "r1", "r2", "r3",
190 "r4", "r5", "r6", "r7", "r8", "r9",
195 *data
= MULT31(*data
, *window
);
209 static inline ogg_int32_t
CLIP_TO_15(ogg_int32_t x
) {
211 asm volatile("subs %1, %0, #32768\n\t"
212 "movpl %0, #0x7f00\n\t"
213 "orrpl %0, %0, #0xff\n"
214 "adds %1, %0, #32768\n\t"
224 #ifndef _V_LSP_MATH_ASM
225 #define _V_LSP_MATH_ASM
227 static inline void lsp_loop_asm(ogg_uint32_t
*qip
,ogg_uint32_t
*pip
,
229 ogg_int32_t
*ilsp
,ogg_int32_t wi
,
232 ogg_uint32_t qi
=*qip
,pi
=*pip
;
233 ogg_int32_t qexp
=*qexpp
;
237 "add r0,r0,r1,lsl#3;"
241 "subs r1,r1,%4;" //ilsp[j]-wi
242 "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
243 "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
245 "subs r1,r3,%4;" //ilsp[j+1]-wi
246 "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
247 "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
249 "cmn r2,r3;" // shift down 16?
253 "orr %0,%0,r2,lsl #16;"
255 "orr %1,%1,r3,lsl #16;"
260 // odd filter assymetry
263 "add r0,%3,%5,lsl#2;\n"
268 "subs r1,r1,%4;\n" //ilsp[j]-wi
269 "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
270 "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
271 "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
273 "cmn r2,r3;\n" // shift down 16?
276 "mov %0,%0,lsr #16;\n"
277 "orr %0,%0,r2,lsl #16;\n"
278 "mov %1,%1,lsr #16;\n"
279 "orr %1,%1,r3,lsl #16;\n"
281 //qi=(pi>>shift)*labs(ilsp[j]-wi);
282 //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
287 /* normalize to max 16 sig figs */
291 "tst r1,#0xff000000;"
293 "movne r1,r1,lsr #8;"
294 "tst r1,#0x00f00000;"
296 "movne r1,r1,lsr #4;"
297 "tst r1,#0x000c0000;"
299 "movne r1,r1,lsr #2;"
300 "tst r1,#0x00020000;"
302 "movne r1,r1,lsr #1;"
303 "tst r1,#0x00010000;"
309 : "+r"(qi
),"+r"(pi
),"+r"(qexp
)
310 : "r"(ilsp
),"r"(wi
),"r"(m
)
311 : "r0","r1","r2","r3","cc");
318 static inline void lsp_norm_asm(ogg_uint32_t
*qip
,ogg_int32_t
*qexpp
){
320 ogg_uint32_t qi
=*qip
;
321 ogg_int32_t qexp
=*qexpp
;
323 asm("tst %0,#0x0000ff00;"
324 "moveq %0,%0,lsl #8;"
326 "tst %0,#0x0000f000;"
327 "moveq %0,%0,lsl #4;"
329 "tst %0,#0x0000c000;"
330 "moveq %0,%0,lsl #2;"
332 "tst %0,#0x00008000;"
333 "moveq %0,%0,lsl #1;"
335 : "+r"(qi
),"+r"(qexp
)