10 #include "mixer_defs.h"
13 const ALfloat
*Resample_lerp_Neon(const InterpState
* UNUSED(state
),
14 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
15 ALfloat
*restrict dst
, ALsizei numsamples
)
17 const int32x4_t increment4
= vdupq_n_s32(increment
*4);
18 const float32x4_t fracOne4
= vdupq_n_f32(1.0f
/FRACTIONONE
);
19 const int32x4_t fracMask4
= vdupq_n_s32(FRACTIONMASK
);
20 alignas(16) ALint pos_
[4];
21 alignas(16) ALsizei frac_
[4];
26 InitiatePositionArrays(frac
, increment
, frac_
, pos_
, 4);
28 frac4
= vld1q_s32(frac_
);
29 pos4
= vld1q_s32(pos_
);
31 for(i
= 0;numsamples
-i
> 3;i
+= 4)
33 const float32x4_t val1
= (float32x4_t
){src
[pos_
[0]], src
[pos_
[1]], src
[pos_
[2]], src
[pos_
[3]]};
34 const float32x4_t val2
= (float32x4_t
){src
[pos_
[0]+1], src
[pos_
[1]+1], src
[pos_
[2]+1], src
[pos_
[3]+1]};
36 /* val1 + (val2-val1)*mu */
37 const float32x4_t r0
= vsubq_f32(val2
, val1
);
38 const float32x4_t mu
= vmulq_f32(vcvtq_f32_s32(frac4
), fracOne4
);
39 const float32x4_t out
= vmlaq_f32(val1
, mu
, r0
);
41 vst1q_f32(&dst
[i
], out
);
43 frac4
= vaddq_s32(frac4
, increment4
);
44 pos4
= vaddq_s32(pos4
, vshrq_n_s32(frac4
, FRACTIONBITS
));
45 frac4
= vandq_s32(frac4
, fracMask4
);
47 vst1q_s32(pos_
, pos4
);
52 /* NOTE: These four elements represent the position *after* the last
53 * four samples, so the lowest element is the next position to
57 frac
= vgetq_lane_s32(frac4
, 0);
59 dst
[i
] = lerp(src
[pos
], src
[pos
+1], frac
* (1.0f
/FRACTIONONE
));
62 pos
+= frac
>>FRACTIONBITS
;
64 } while(++i
< numsamples
);
69 const ALfloat
*Resample_fir4_Neon(const InterpState
*state
,
70 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
71 ALfloat
*restrict dst
, ALsizei numsamples
)
73 const ALfloat (*restrict filter
)[4] = ASSUME_ALIGNED(state
->sinc4
.filter
, 16);
74 const int32x4_t increment4
= vdupq_n_s32(increment
*4);
75 const int32x4_t fracMask4
= vdupq_n_s32(FRACTIONMASK
);
76 alignas(16) ALint pos_
[4];
77 alignas(16) ALsizei frac_
[4];
82 InitiatePositionArrays(frac
, increment
, frac_
, pos_
, 4);
84 frac4
= vld1q_s32(frac_
);
85 pos4
= vld1q_s32(pos_
);
88 for(i
= 0;numsamples
-i
> 3;i
+= 4)
90 const float32x4_t val0
= vld1q_f32(&src
[pos_
[0]]);
91 const float32x4_t val1
= vld1q_f32(&src
[pos_
[1]]);
92 const float32x4_t val2
= vld1q_f32(&src
[pos_
[2]]);
93 const float32x4_t val3
= vld1q_f32(&src
[pos_
[3]]);
94 float32x4_t k0
= vld1q_f32(filter
[frac_
[0]]);
95 float32x4_t k1
= vld1q_f32(filter
[frac_
[1]]);
96 float32x4_t k2
= vld1q_f32(filter
[frac_
[2]]);
97 float32x4_t k3
= vld1q_f32(filter
[frac_
[3]]);
100 k0
= vmulq_f32(k0
, val0
);
101 k1
= vmulq_f32(k1
, val1
);
102 k2
= vmulq_f32(k2
, val2
);
103 k3
= vmulq_f32(k3
, val3
);
104 k0
= vcombine_f32(vpadd_f32(vget_low_f32(k0
), vget_high_f32(k0
)),
105 vpadd_f32(vget_low_f32(k1
), vget_high_f32(k1
)));
106 k2
= vcombine_f32(vpadd_f32(vget_low_f32(k2
), vget_high_f32(k2
)),
107 vpadd_f32(vget_low_f32(k3
), vget_high_f32(k3
)));
108 out
= vcombine_f32(vpadd_f32(vget_low_f32(k0
), vget_high_f32(k0
)),
109 vpadd_f32(vget_low_f32(k2
), vget_high_f32(k2
)));
111 vst1q_f32(&dst
[i
], out
);
113 frac4
= vaddq_s32(frac4
, increment4
);
114 pos4
= vaddq_s32(pos4
, vshrq_n_s32(frac4
, FRACTIONBITS
));
115 frac4
= vandq_s32(frac4
, fracMask4
);
117 vst1q_s32(pos_
, pos4
);
118 vst1q_s32(frac_
, frac4
);
123 /* NOTE: These four elements represent the position *after* the last
124 * four samples, so the lowest element is the next position to
130 dst
[i
] = resample_fir4(src
[pos
], src
[pos
+1], src
[pos
+2], src
[pos
+3], filter
[frac
]);
133 pos
+= frac
>>FRACTIONBITS
;
134 frac
&= FRACTIONMASK
;
135 } while(++i
< numsamples
);
140 const ALfloat
*Resample_bsinc_Neon(const InterpState
*state
,
141 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
142 ALfloat
*restrict dst
, ALsizei dstlen
)
144 const ALfloat
*const filter
= state
->bsinc
.filter
;
145 const float32x4_t sf4
= vdupq_n_f32(state
->bsinc
.sf
);
146 const ALsizei m
= state
->bsinc
.m
;
147 const float32x4_t
*fil
, *scd
, *phd
, *spd
;
148 ALsizei pi
, i
, j
, offset
;
152 src
+= state
->bsinc
.l
;
153 for(i
= 0;i
< dstlen
;i
++)
155 // Calculate the phase index and factor.
156 #define FRAC_PHASE_BITDIFF (FRACTIONBITS-BSINC_PHASE_BITS)
157 pi
= frac
>> FRAC_PHASE_BITDIFF
;
158 pf
= (frac
& ((1<<FRAC_PHASE_BITDIFF
)-1)) * (1.0f
/(1<<FRAC_PHASE_BITDIFF
));
159 #undef FRAC_PHASE_BITDIFF
162 fil
= ASSUME_ALIGNED(filter
+ offset
, 16); offset
+= m
;
163 scd
= ASSUME_ALIGNED(filter
+ offset
, 16); offset
+= m
;
164 phd
= ASSUME_ALIGNED(filter
+ offset
, 16); offset
+= m
;
165 spd
= ASSUME_ALIGNED(filter
+ offset
, 16);
167 // Apply the scale and phase interpolated filter.
168 r4
= vdupq_n_f32(0.0f
);
170 const float32x4_t pf4
= vdupq_n_f32(pf
);
171 for(j
= 0;j
< m
;j
+=4,fil
++,scd
++,phd
++,spd
++)
173 /* f = ((fil + sf*scd) + pf*(phd + sf*spd)) */
174 const float32x4_t f4
= vmlaq_f32(
175 vmlaq_f32(*fil
, sf4
, *scd
),
176 pf4
, vmlaq_f32(*phd
, sf4
, *spd
)
179 r4
= vmlaq_f32(r4
, f4
, vld1q_f32(&src
[j
]));
182 r4
= vaddq_f32(r4
, vcombine_f32(vrev64_f32(vget_high_f32(r4
)),
183 vrev64_f32(vget_low_f32(r4
))));
184 dst
[i
] = vget_lane_f32(vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
)), 0);
187 src
+= frac
>>FRACTIONBITS
;
188 frac
&= FRACTIONMASK
;
194 static inline void ApplyCoeffs(ALsizei Offset
, ALfloat (*restrict Values
)[2],
195 const ALsizei IrSize
,
196 const ALfloat (*restrict Coeffs
)[2],
197 ALfloat left
, ALfloat right
)
200 float32x4_t leftright4
;
202 float32x2_t leftright2
= vdup_n_f32(0.0);
203 leftright2
= vset_lane_f32(left
, leftright2
, 0);
204 leftright2
= vset_lane_f32(right
, leftright2
, 1);
205 leftright4
= vcombine_f32(leftright2
, leftright2
);
207 Values
= ASSUME_ALIGNED(Values
, 16);
208 Coeffs
= ASSUME_ALIGNED(Coeffs
, 16);
209 for(c
= 0;c
< IrSize
;c
+= 2)
211 const ALsizei o0
= (Offset
+c
)&HRIR_MASK
;
212 const ALsizei o1
= (o0
+1)&HRIR_MASK
;
213 float32x4_t vals
= vcombine_f32(vld1_f32((float32_t
*)&Values
[o0
][0]),
214 vld1_f32((float32_t
*)&Values
[o1
][0]));
215 float32x4_t coefs
= vld1q_f32((float32_t
*)&Coeffs
[c
][0]);
217 vals
= vmlaq_f32(vals
, coefs
, leftright4
);
219 vst1_f32((float32_t
*)&Values
[o0
][0], vget_low_f32(vals
));
220 vst1_f32((float32_t
*)&Values
[o1
][0], vget_high_f32(vals
));
224 #define MixHrtf MixHrtf_Neon
225 #define MixHrtfBlend MixHrtfBlend_Neon
226 #define MixDirectHrtf MixDirectHrtf_Neon
227 #include "mixer_inc.c"
231 void Mix_Neon(const ALfloat
*data
, ALsizei OutChans
, ALfloat (*restrict OutBuffer
)[BUFFERSIZE
],
232 ALfloat
*CurrentGains
, const ALfloat
*TargetGains
, ALsizei Counter
, ALsizei OutPos
,
235 ALfloat gain
, delta
, step
;
239 data
= ASSUME_ALIGNED(data
, 16);
240 OutBuffer
= ASSUME_ALIGNED(OutBuffer
, 16);
242 delta
= (Counter
> 0) ? 1.0f
/(ALfloat
)Counter
: 0.0f
;
244 for(c
= 0;c
< OutChans
;c
++)
247 gain
= CurrentGains
[c
];
248 step
= (TargetGains
[c
] - gain
) * delta
;
249 if(fabsf(step
) > FLT_EPSILON
)
251 ALsizei minsize
= mini(BufferSize
, Counter
);
252 /* Mix with applying gain steps in aligned multiples of 4. */
256 gain4
= vsetq_lane_f32(gain
, gain4
, 0);
257 gain4
= vsetq_lane_f32(gain
+ step
, gain4
, 1);
258 gain4
= vsetq_lane_f32(gain
+ step
+ step
, gain4
, 2);
259 gain4
= vsetq_lane_f32(gain
+ step
+ step
+ step
, gain4
, 3);
260 step4
= vdupq_n_f32(step
+ step
+ step
+ step
);
262 const float32x4_t val4
= vld1q_f32(&data
[pos
]);
263 float32x4_t dry4
= vld1q_f32(&OutBuffer
[c
][OutPos
+pos
]);
264 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
265 gain4
= vaddq_f32(gain4
, step4
);
266 vst1q_f32(&OutBuffer
[c
][OutPos
+pos
], dry4
);
268 } while(minsize
-pos
> 3);
269 /* NOTE: gain4 now represents the next four gains after the
270 * last four mixed samples, so the lowest element represents
271 * the next gain to apply.
273 gain
= vgetq_lane_f32(gain4
, 0);
275 /* Mix with applying left over gain steps that aren't aligned multiples of 4. */
276 for(;pos
< minsize
;pos
++)
278 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
282 gain
= TargetGains
[c
];
283 CurrentGains
[c
] = gain
;
285 /* Mix until pos is aligned with 4 or the mix is done. */
286 minsize
= mini(BufferSize
, (pos
+3)&~3);
287 for(;pos
< minsize
;pos
++)
288 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
291 if(!(fabsf(gain
) > GAIN_SILENCE_THRESHOLD
))
293 gain4
= vdupq_n_f32(gain
);
294 for(;BufferSize
-pos
> 3;pos
+= 4)
296 const float32x4_t val4
= vld1q_f32(&data
[pos
]);
297 float32x4_t dry4
= vld1q_f32(&OutBuffer
[c
][OutPos
+pos
]);
298 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
299 vst1q_f32(&OutBuffer
[c
][OutPos
+pos
], dry4
);
301 for(;pos
< BufferSize
;pos
++)
302 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
306 void MixRow_Neon(ALfloat
*OutBuffer
, const ALfloat
*Gains
, const ALfloat (*restrict data
)[BUFFERSIZE
], ALsizei InChans
, ALsizei InPos
, ALsizei BufferSize
)
311 data
= ASSUME_ALIGNED(data
, 16);
312 OutBuffer
= ASSUME_ALIGNED(OutBuffer
, 16);
314 for(c
= 0;c
< InChans
;c
++)
317 ALfloat gain
= Gains
[c
];
318 if(!(fabsf(gain
) > GAIN_SILENCE_THRESHOLD
))
321 gain4
= vdupq_n_f32(gain
);
322 for(;BufferSize
-pos
> 3;pos
+= 4)
324 const float32x4_t val4
= vld1q_f32(&data
[c
][InPos
+pos
]);
325 float32x4_t dry4
= vld1q_f32(&OutBuffer
[pos
]);
326 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
327 vst1q_f32(&OutBuffer
[pos
], dry4
);
329 for(;pos
< BufferSize
;pos
++)
330 OutBuffer
[pos
] += data
[c
][InPos
+pos
]*gain
;