1 /********************************************************************
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
12 ********************************************************************
14 function: arm7 and later wide math functions
16 ********************************************************************/
20 #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
23 static inline ogg_int32_t
MULT32(ogg_int32_t x
, ogg_int32_t y
) {
25 asm volatile("smull\t%0, %1, %2, %3"
32 static inline ogg_int32_t
MULT31(ogg_int32_t x
, ogg_int32_t y
) {
33 return MULT32(x
,y
)<<1;
36 static inline ogg_int32_t
MULT31_SHIFT15(ogg_int32_t x
, ogg_int32_t y
) {
38 asm volatile("smull %0, %1, %2, %3\n\t"
39 "movs %0, %0, lsr #15\n\t"
40 "adc %1, %0, %1, lsl #17\n\t"
47 #define MB() asm volatile ("" : : : "memory")
49 static inline void XPROD32(ogg_int32_t a
, ogg_int32_t b
,
50 ogg_int32_t t
, ogg_int32_t v
,
51 ogg_int32_t
*x
, ogg_int32_t
*y
)
54 asm( "smull %0, %1, %4, %6\n\t"
55 "smlal %0, %1, %5, %7\n\t"
57 "smull %0, %2, %5, %6\n\t"
58 "smlal %0, %2, %3, %7"
59 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
), "=r" (a
)
60 : "3" (a
), "r" (b
), "r" (t
), "r" (v
)
67 static inline void XPROD31(ogg_int32_t a
, ogg_int32_t b
,
68 ogg_int32_t t
, ogg_int32_t v
,
69 ogg_int32_t
*x
, ogg_int32_t
*y
)
72 asm( "smull %0, %1, %4, %6\n\t"
73 "smlal %0, %1, %5, %7\n\t"
75 "smull %0, %2, %5, %6\n\t"
76 "smlal %0, %2, %3, %7"
77 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
), "=r" (a
)
78 : "3" (a
), "r" (b
), "r" (t
), "r" (v
)
85 static inline void XNPROD31(ogg_int32_t a
, ogg_int32_t b
,
86 ogg_int32_t t
, ogg_int32_t v
,
87 ogg_int32_t
*x
, ogg_int32_t
*y
)
90 asm( "rsb %2, %4, #0\n\t"
91 "smull %0, %1, %3, %5\n\t"
92 "smlal %0, %1, %2, %6\n\t"
93 "smull %0, %2, %4, %5\n\t"
94 "smlal %0, %2, %3, %6"
95 : "=&r" (l
), "=&r" (x1
), "=&r" (y1
)
96 : "r" (a
), "r" (b
), "r" (t
), "r" (v
)
108 static inline ogg_int32_t
CLIP_TO_15(ogg_int32_t x
) {
110 asm volatile("subs %1, %0, #32768\n\t"
111 "movpl %0, #0x7f00\n\t"
112 "orrpl %0, %0, #0xff\n"
113 "adds %1, %0, #32768\n\t"
123 #ifndef _V_LSP_MATH_ASM
124 #define _V_LSP_MATH_ASM
126 static inline void lsp_loop_asm(ogg_uint32_t
*qip
,ogg_uint32_t
*pip
,
128 ogg_int32_t
*ilsp
,ogg_int32_t wi
,
131 ogg_uint32_t qi
=*qip
,pi
=*pip
;
132 ogg_int32_t qexp
=*qexpp
;
136 "add r0,r0,r1,lsl#3;"
140 "subs r1,r1,%4;" //ilsp[j]-wi
141 "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
142 "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
144 "subs r1,r3,%4;" //ilsp[j+1]-wi
145 "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
146 "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
148 "cmn r2,r3;" // shift down 16?
152 "orr %0,%0,r2,lsl #16;"
154 "orr %1,%1,r3,lsl #16;"
159 // odd filter assymetry
162 "add r0,%3,%5,lsl#2;\n"
167 "subs r1,r1,%4;\n" //ilsp[j]-wi
168 "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
169 "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
170 "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
172 "cmn r2,r3;\n" // shift down 16?
175 "mov %0,%0,lsr #16;\n"
176 "orr %0,%0,r2,lsl #16;\n"
177 "mov %1,%1,lsr #16;\n"
178 "orr %1,%1,r3,lsl #16;\n"
180 //qi=(pi>>shift)*labs(ilsp[j]-wi);
181 //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
186 /* normalize to max 16 sig figs */
190 "tst r1,#0xff000000;"
192 "movne r1,r1,lsr #8;"
193 "tst r1,#0x00f00000;"
195 "movne r1,r1,lsr #4;"
196 "tst r1,#0x000c0000;"
198 "movne r1,r1,lsr #2;"
199 "tst r1,#0x00020000;"
201 "movne r1,r1,lsr #1;"
202 "tst r1,#0x00010000;"
208 : "+r"(qi
),"+r"(pi
),"+r"(qexp
)
209 : "r"(ilsp
),"r"(wi
),"r"(m
)
210 : "r0","r1","r2","r3","cc");
217 static inline void lsp_norm_asm(ogg_uint32_t
*qip
,ogg_int32_t
*qexpp
){
219 ogg_uint32_t qi
=*qip
;
220 ogg_int32_t qexp
=*qexpp
;
222 asm("tst %0,#0x0000ff00;"
223 "moveq %0,%0,lsl #8;"
225 "tst %0,#0x0000f000;"
226 "moveq %0,%0,lsl #4;"
228 "tst %0,#0x0000c000;"
229 "moveq %0,%0,lsl #2;"
231 "tst %0,#0x00008000;"
232 "moveq %0,%0,lsl #1;"
234 : "+r"(qi
),"+r"(qexp
)