10 #include "mixer_defs.h"
13 const ALfloat
*Resample_lerp_Neon(const InterpState
* UNUSED(state
),
14 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
15 ALfloat
*restrict dst
, ALsizei numsamples
)
17 const int32x4_t increment4
= vdupq_n_s32(increment
*4);
18 const float32x4_t fracOne4
= vdupq_n_f32(1.0f
/FRACTIONONE
);
19 const int32x4_t fracMask4
= vdupq_n_s32(FRACTIONMASK
);
20 alignas(16) ALint pos_
[4];
21 alignas(16) ALsizei frac_
[4];
26 InitiatePositionArrays(frac
, increment
, frac_
, pos_
, 4);
28 frac4
= vld1q_s32(frac_
);
29 pos4
= vld1q_s32(pos_
);
31 for(i
= 0;numsamples
-i
> 3;i
+= 4)
33 const float32x4_t val1
= (float32x4_t
){src
[pos_
[0]], src
[pos_
[1]], src
[pos_
[2]], src
[pos_
[3]]};
34 const float32x4_t val2
= (float32x4_t
){src
[pos_
[0]+1], src
[pos_
[1]+1], src
[pos_
[2]+1], src
[pos_
[3]+1]};
36 /* val1 + (val2-val1)*mu */
37 const float32x4_t r0
= vsubq_f32(val2
, val1
);
38 const float32x4_t mu
= vmulq_f32(vcvtq_f32_s32(frac4
), fracOne4
);
39 const float32x4_t out
= vmlaq_f32(val1
, mu
, r0
);
41 vst1q_f32(&dst
[i
], out
);
43 frac4
= vaddq_s32(frac4
, increment4
);
44 pos4
= vaddq_s32(pos4
, vshrq_n_s32(frac4
, FRACTIONBITS
));
45 frac4
= vandq_s32(frac4
, fracMask4
);
47 vst1q_s32(pos_
, pos4
);
52 /* NOTE: These four elements represent the position *after* the last
53 * four samples, so the lowest element is the next position to
57 frac
= vgetq_lane_s32(frac4
, 0);
59 dst
[i
] = lerp(src
[pos
], src
[pos
+1], frac
* (1.0f
/FRACTIONONE
));
62 pos
+= frac
>>FRACTIONBITS
;
64 } while(++i
< numsamples
);
69 const ALfloat
*Resample_fir4_Neon(const InterpState
*state
,
70 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
71 ALfloat
*restrict dst
, ALsizei numsamples
)
73 const ALfloat (*restrict filter
)[4] = ASSUME_ALIGNED(state
->sinc4
.filter
, 16);
74 const int32x4_t increment4
= vdupq_n_s32(increment
*4);
75 const int32x4_t fracMask4
= vdupq_n_s32(FRACTIONMASK
);
76 alignas(16) ALint pos_
[4];
77 alignas(16) ALsizei frac_
[4];
82 InitiatePositionArrays(frac
, increment
, frac_
, pos_
, 4);
84 frac4
= vld1q_s32(frac_
);
85 pos4
= vld1q_s32(pos_
);
88 for(i
= 0;numsamples
-i
> 3;i
+= 4)
90 const float32x4_t val0
= vld1q_f32(&src
[pos_
[0]]);
91 const float32x4_t val1
= vld1q_f32(&src
[pos_
[1]]);
92 const float32x4_t val2
= vld1q_f32(&src
[pos_
[2]]);
93 const float32x4_t val3
= vld1q_f32(&src
[pos_
[3]]);
94 float32x4_t k0
= vld1q_f32(filter
[frac_
[0]]);
95 float32x4_t k1
= vld1q_f32(filter
[frac_
[1]]);
96 float32x4_t k2
= vld1q_f32(filter
[frac_
[2]]);
97 float32x4_t k3
= vld1q_f32(filter
[frac_
[3]]);
100 k0
= vmulq_f32(k0
, val0
);
101 k1
= vmulq_f32(k1
, val1
);
102 k2
= vmulq_f32(k2
, val2
);
103 k3
= vmulq_f32(k3
, val3
);
104 k0
= vcombine_f32(vpadd_f32(vget_low_f32(k0
), vget_high_f32(k0
)),
105 vpadd_f32(vget_low_f32(k1
), vget_high_f32(k1
)));
106 k2
= vcombine_f32(vpadd_f32(vget_low_f32(k2
), vget_high_f32(k2
)),
107 vpadd_f32(vget_low_f32(k3
), vget_high_f32(k3
)));
108 out
= vcombine_f32(vpadd_f32(vget_low_f32(k0
), vget_high_f32(k0
)),
109 vpadd_f32(vget_low_f32(k2
), vget_high_f32(k2
)));
111 vst1q_f32(&dst
[i
], out
);
113 frac4
= vaddq_s32(frac4
, increment4
);
114 pos4
= vaddq_s32(pos4
, vshrq_n_s32(frac4
, FRACTIONBITS
));
115 frac4
= vandq_s32(frac4
, fracMask4
);
117 vst1q_s32(pos_
, pos4
);
118 vst1q_s32(frac_
, frac4
);
123 /* NOTE: These four elements represent the position *after* the last
124 * four samples, so the lowest element is the next position to
130 dst
[i
] = resample_fir4(src
[pos
], src
[pos
+1], src
[pos
+2], src
[pos
+3], filter
[frac
]);
133 pos
+= frac
>>FRACTIONBITS
;
134 frac
&= FRACTIONMASK
;
135 } while(++i
< numsamples
);
140 const ALfloat
*Resample_bsinc_Neon(const InterpState
*state
,
141 const ALfloat
*restrict src
, ALsizei frac
, ALint increment
,
142 ALfloat
*restrict dst
, ALsizei dstlen
)
144 const ALfloat
*const filter
= state
->bsinc
.filter
;
145 const float32x4_t sf4
= vdupq_n_f32(state
->bsinc
.sf
);
146 const ALsizei m
= state
->bsinc
.m
;
147 const ALfloat
*fil
, *scd
, *phd
, *spd
;
152 src
+= state
->bsinc
.l
;
153 for(i
= 0;i
< dstlen
;i
++)
155 // Calculate the phase index and factor.
156 #define FRAC_PHASE_BITDIFF (FRACTIONBITS-BSINC_PHASE_BITS)
157 pi
= frac
>> FRAC_PHASE_BITDIFF
;
158 pf
= (frac
& ((1<<FRAC_PHASE_BITDIFF
)-1)) * (1.0f
/(1<<FRAC_PHASE_BITDIFF
));
159 #undef FRAC_PHASE_BITDIFF
161 fil
= ASSUME_ALIGNED(filter
+ m
*pi
*4, 16);
162 scd
= ASSUME_ALIGNED(fil
+ m
, 16);
163 phd
= ASSUME_ALIGNED(scd
+ m
, 16);
164 spd
= ASSUME_ALIGNED(phd
+ m
, 16);
166 // Apply the scale and phase interpolated filter.
167 r4
= vdupq_n_f32(0.0f
);
169 const float32x4_t pf4
= vdupq_n_f32(pf
);
170 for(j
= 0;j
< m
;j
+=4)
172 /* f = ((fil + sf*scd) + pf*(phd + sf*spd)) */
173 const float32x4_t f4
= vmlaq_f32(vmlaq_f32(vld1q_f32(&fil
[j
]),
174 sf4
, vld1q_f32(&scd
[j
])),
175 pf4
, vmlaq_f32(vld1q_f32(&phd
[j
]),
176 sf4
, vld1q_f32(&spd
[j
])
180 r4
= vmlaq_f32(r4
, f4
, vld1q_f32(&src
[j
]));
183 r4
= vaddq_f32(r4
, vcombine_f32(vrev64_f32(vget_high_f32(r4
)),
184 vrev64_f32(vget_low_f32(r4
))));
185 dst
[i
] = vget_lane_f32(vadd_f32(vget_low_f32(r4
), vget_high_f32(r4
)), 0);
188 src
+= frac
>>FRACTIONBITS
;
189 frac
&= FRACTIONMASK
;
195 static inline void ApplyCoeffs(ALsizei Offset
, ALfloat (*restrict Values
)[2],
196 const ALsizei IrSize
,
197 const ALfloat (*restrict Coeffs
)[2],
198 ALfloat left
, ALfloat right
)
201 float32x4_t leftright4
;
203 float32x2_t leftright2
= vdup_n_f32(0.0);
204 leftright2
= vset_lane_f32(left
, leftright2
, 0);
205 leftright2
= vset_lane_f32(right
, leftright2
, 1);
206 leftright4
= vcombine_f32(leftright2
, leftright2
);
208 Values
= ASSUME_ALIGNED(Values
, 16);
209 Coeffs
= ASSUME_ALIGNED(Coeffs
, 16);
210 for(c
= 0;c
< IrSize
;c
+= 2)
212 const ALsizei o0
= (Offset
+c
)&HRIR_MASK
;
213 const ALsizei o1
= (o0
+1)&HRIR_MASK
;
214 float32x4_t vals
= vcombine_f32(vld1_f32((float32_t
*)&Values
[o0
][0]),
215 vld1_f32((float32_t
*)&Values
[o1
][0]));
216 float32x4_t coefs
= vld1q_f32((float32_t
*)&Coeffs
[c
][0]);
218 vals
= vmlaq_f32(vals
, coefs
, leftright4
);
220 vst1_f32((float32_t
*)&Values
[o0
][0], vget_low_f32(vals
));
221 vst1_f32((float32_t
*)&Values
[o1
][0], vget_high_f32(vals
));
225 #define MixHrtf MixHrtf_Neon
226 #define MixHrtfBlend MixHrtfBlend_Neon
227 #define MixDirectHrtf MixDirectHrtf_Neon
228 #include "mixer_inc.c"
232 void Mix_Neon(const ALfloat
*data
, ALsizei OutChans
, ALfloat (*restrict OutBuffer
)[BUFFERSIZE
],
233 ALfloat
*CurrentGains
, const ALfloat
*TargetGains
, ALsizei Counter
, ALsizei OutPos
,
236 ALfloat gain
, delta
, step
;
240 data
= ASSUME_ALIGNED(data
, 16);
241 OutBuffer
= ASSUME_ALIGNED(OutBuffer
, 16);
243 delta
= (Counter
> 0) ? 1.0f
/(ALfloat
)Counter
: 0.0f
;
245 for(c
= 0;c
< OutChans
;c
++)
248 gain
= CurrentGains
[c
];
249 step
= (TargetGains
[c
] - gain
) * delta
;
250 if(fabsf(step
) > FLT_EPSILON
)
252 ALsizei minsize
= mini(BufferSize
, Counter
);
253 /* Mix with applying gain steps in aligned multiples of 4. */
257 gain4
= vsetq_lane_f32(gain
, gain4
, 0);
258 gain4
= vsetq_lane_f32(gain
+ step
, gain4
, 1);
259 gain4
= vsetq_lane_f32(gain
+ step
+ step
, gain4
, 2);
260 gain4
= vsetq_lane_f32(gain
+ step
+ step
+ step
, gain4
, 3);
261 step4
= vdupq_n_f32(step
+ step
+ step
+ step
);
263 const float32x4_t val4
= vld1q_f32(&data
[pos
]);
264 float32x4_t dry4
= vld1q_f32(&OutBuffer
[c
][OutPos
+pos
]);
265 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
266 gain4
= vaddq_f32(gain4
, step4
);
267 vst1q_f32(&OutBuffer
[c
][OutPos
+pos
], dry4
);
269 } while(minsize
-pos
> 3);
270 /* NOTE: gain4 now represents the next four gains after the
271 * last four mixed samples, so the lowest element represents
272 * the next gain to apply.
274 gain
= vgetq_lane_f32(gain4
, 0);
276 /* Mix with applying left over gain steps that aren't aligned multiples of 4. */
277 for(;pos
< minsize
;pos
++)
279 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
283 gain
= TargetGains
[c
];
284 CurrentGains
[c
] = gain
;
286 /* Mix until pos is aligned with 4 or the mix is done. */
287 minsize
= mini(BufferSize
, (pos
+3)&~3);
288 for(;pos
< minsize
;pos
++)
289 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
292 if(!(fabsf(gain
) > GAIN_SILENCE_THRESHOLD
))
294 gain4
= vdupq_n_f32(gain
);
295 for(;BufferSize
-pos
> 3;pos
+= 4)
297 const float32x4_t val4
= vld1q_f32(&data
[pos
]);
298 float32x4_t dry4
= vld1q_f32(&OutBuffer
[c
][OutPos
+pos
]);
299 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
300 vst1q_f32(&OutBuffer
[c
][OutPos
+pos
], dry4
);
302 for(;pos
< BufferSize
;pos
++)
303 OutBuffer
[c
][OutPos
+pos
] += data
[pos
]*gain
;
307 void MixRow_Neon(ALfloat
*OutBuffer
, const ALfloat
*Gains
, const ALfloat (*restrict data
)[BUFFERSIZE
], ALsizei InChans
, ALsizei InPos
, ALsizei BufferSize
)
312 data
= ASSUME_ALIGNED(data
, 16);
313 OutBuffer
= ASSUME_ALIGNED(OutBuffer
, 16);
315 for(c
= 0;c
< InChans
;c
++)
318 ALfloat gain
= Gains
[c
];
319 if(!(fabsf(gain
) > GAIN_SILENCE_THRESHOLD
))
322 gain4
= vdupq_n_f32(gain
);
323 for(;BufferSize
-pos
> 3;pos
+= 4)
325 const float32x4_t val4
= vld1q_f32(&data
[c
][InPos
+pos
]);
326 float32x4_t dry4
= vld1q_f32(&OutBuffer
[pos
]);
327 dry4
= vmlaq_f32(dry4
, val4
, gain4
);
328 vst1q_f32(&OutBuffer
[pos
], dry4
);
330 for(;pos
< BufferSize
;pos
++)
331 OutBuffer
[pos
] += data
[c
][InPos
+pos
]*gain
;