1 /********************************************************************
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
19 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
23 static inline int32_t MULT32(int32_t x
, int32_t y
) {
25 asm volatile("smmul %[hi], %[x], %[y] \n\t"
27 : [x
] "r" (x
), [y
] "r" (y
) );
31 static inline int32_t MULT32(int32_t x
, int32_t y
) {
33 asm volatile("smull\t%0, %1, %2, %3 \n\t"
40 static inline int32_t MULT31(int32_t x
, int32_t y
) {
41 return MULT32(x
,y
)<<1;
44 static inline int32_t MULT31_SHIFT15(int32_t x
, int32_t y
) {
46 asm volatile("smull %0, %1, %2, %3\n\t"
47 "movs %0, %0, lsr #15\n\t"
48 "adc %1, %0, %1, lsl #17\n\t"
55 #define XPROD32(a, b, t, v, x, y) \
58 asm("smull %0, %1, %3, %5\n\t" \
59 "rsb %2, %6, #0\n\t" \
60 "smlal %0, %1, %4, %6\n\t" \
61 "smull %0, %2, %3, %2\n\t" \
62 "smlal %0, %2, %4, %5" \
63 : "=&r" (l), "=&r" (x), "=&r" (y) \
64 : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \
68 /* These may yield slightly different result from the macros below
69 because only the high 32 bits of the multiplications are accumulated while
70 the below macros use a 64 bit accumulator that is truncated to 32 bits.*/
71 #define XPROD31_R(_a, _b, _t, _v, _x, _y)\
74 asm("smmul %[x1], %[t], %[a] \n\t"\
75 "smmul %[y1], %[t], %[b] \n\t"\
76 "smmla %[x1], %[v], %[b], %[x1] \n\t"\
77 "smmls %[y1], %[v], %[a], %[y1] \n\t"\
78 : [x1] "=&r" (x1), [y1] "=&r" (y1)\
79 : [a] "r" (_a), [b] "r" (_b), [t] "r" (_t), [v] "r" (_v) );\
84 #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
87 asm("smmul %[x1], %[t], %[a] \n\t"\
88 "smmul %[y1], %[t], %[b] \n\t"\
89 "smmls %[x1], %[v], %[b], %[x1] \n\t"\
90 "smmla %[y1], %[v], %[a], %[y1] \n\t"\
91 : [x1] "=&r" (x1), [y1] "=&r" (y1)\
92 : [a] "r" (_a), [b] "r" (_b), [t] "r" (_t), [v] "r" (_v) );\
97 #define XPROD31_R(_a, _b, _t, _v, _x, _y)\
100 asm("smull %0, %1, %5, %3\n\t"\
101 "rsb %2, %3, #0\n\t"\
102 "smlal %0, %1, %6, %4\n\t"\
103 "smull %0, %2, %6, %2\n\t"\
104 "smlal %0, %2, %5, %4"\
105 : "=&r" (l), "=&r" (x1), "=&r" (y1)\
106 : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\
111 #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
114 asm("smull %0, %1, %5, %3\n\t"\
115 "rsb %2, %4, #0\n\t"\
116 "smlal %0, %1, %6, %2\n\t"\
117 "smull %0, %2, %5, %4\n\t"\
118 "smlal %0, %2, %6, %3"\
119 : "=&r" (l), "=&r" (x1), "=&r" (y1)\
120 : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\
126 static inline void XPROD31(int32_t a
, int32_t b
,
127 int32_t t
, int32_t v
,
128 int32_t *x
, int32_t *y
)
131 XPROD31_R(a
, b
, t
, v
, _x1
, _y1
);
136 static inline void XNPROD31(int32_t a
, int32_t b
,
137 int32_t t
, int32_t v
,
138 int32_t *x
, int32_t *y
)
141 XNPROD31_R(a
, b
, t
, v
, _x1
, _y1
);
150 /* asm versions of vector operations for block.c, window.c */
152 void vect_add(int32_t *x
, int32_t *y
, int n
)
155 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
156 "ldmia %[y]!, {r4, r5, r6, r7};"
161 "stmia %[x]!, {r0, r1, r2, r3};"
162 : [x
] "+r" (x
), [y
] "+r" (y
)
163 : : "r0", "r1", "r2", "r3",
164 "r4", "r5", "r6", "r7",
168 /* add final elements */
176 void vect_copy(int32_t *x
, int32_t *y
, int n
)
179 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
180 "stmia %[x]!, {r0, r1, r2, r3};"
181 : [x
] "+r" (x
), [y
] "+r" (y
)
182 : : "r0", "r1", "r2", "r3",
186 /* copy final elements */
194 void vect_mult_fw(int32_t *data
, int32_t *window
, int n
)
197 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
198 "ldmia %[w]!, {r4, r5, r6, r7};"
199 "smull r8, r9, r0, r4;"
200 "mov r0, r9, lsl #1;"
201 "smull r8, r9, r1, r5;"
202 "mov r1, r9, lsl #1;"
203 "smull r8, r9, r2, r6;"
204 "mov r2, r9, lsl #1;"
205 "smull r8, r9, r3, r7;"
206 "mov r3, r9, lsl #1;"
207 "stmia %[d]!, {r0, r1, r2, r3};"
208 : [d
] "+r" (data
), [w
] "+r" (window
)
209 : : "r0", "r1", "r2", "r3",
210 "r4", "r5", "r6", "r7", "r8", "r9",
215 *data
= MULT31(*data
, *window
);
223 void vect_mult_bw(int32_t *data
, int32_t *window
, int n
)
226 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
227 "ldmda %[w]!, {r4, r5, r6, r7};"
228 "smull r8, r9, r0, r7;"
229 "mov r0, r9, lsl #1;"
230 "smull r8, r9, r1, r6;"
231 "mov r1, r9, lsl #1;"
232 "smull r8, r9, r2, r5;"
233 "mov r2, r9, lsl #1;"
234 "smull r8, r9, r3, r4;"
235 "mov r3, r9, lsl #1;"
236 "stmia %[d]!, {r0, r1, r2, r3};"
237 : [d
] "+r" (data
), [w
] "+r" (window
)
238 : : "r0", "r1", "r2", "r3",
239 "r4", "r5", "r6", "r7", "r8", "r9",
244 *data
= MULT31(*data
, *window
);
254 /* not used anymore */
259 static inline int32_t CLIP_TO_15(int32_t x) {
261 asm volatile("subs %1, %0, #32768\n\t"
262 "movpl %0, #0x7f00\n\t"
263 "orrpl %0, %0, #0xff\n"
264 "adds %1, %0, #32768\n\t"
274 #ifndef _V_LSP_MATH_ASM
275 #define _V_LSP_MATH_ASM