Update some atomic memory ordering
[openal-soft.git] / Alc / mixer_sse41.c
blob7a4db6cf74db3fa57e21a3e59d56ca6312f0f823
1 /**
2 * OpenAL cross platform audio library
3 * Copyright (C) 2014 by Timothy Arceri <t_arceri@yahoo.com.au>.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * Or go to http://www.gnu.org/copyleft/lgpl.html
21 #include "config.h"
23 #include <xmmintrin.h>
24 #include <emmintrin.h>
25 #include <smmintrin.h>
27 #include "alu.h"
28 #include "mixer_defs.h"
31 const ALfloat *Resample_lerp32_SSE41(const BsincState* UNUSED(state), const ALfloat *restrict src,
32 ALuint frac, ALuint increment, ALfloat *restrict dst,
33 ALuint numsamples)
35 const __m128i increment4 = _mm_set1_epi32(increment*4);
36 const __m128 fracOne4 = _mm_set1_ps(1.0f/FRACTIONONE);
37 const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
38 union { alignas(16) ALuint i[4]; float f[4]; } pos_;
39 union { alignas(16) ALuint i[4]; float f[4]; } frac_;
40 __m128i frac4, pos4;
41 ALuint pos;
42 ALuint i;
44 InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
46 frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
47 pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
49 for(i = 0;numsamples-i > 3;i += 4)
51 const __m128 val1 = _mm_setr_ps(src[pos_.i[0]], src[pos_.i[1]], src[pos_.i[2]], src[pos_.i[3]]);
52 const __m128 val2 = _mm_setr_ps(src[pos_.i[0]+1], src[pos_.i[1]+1], src[pos_.i[2]+1], src[pos_.i[3]+1]);
54 /* val1 + (val2-val1)*mu */
55 const __m128 r0 = _mm_sub_ps(val2, val1);
56 const __m128 mu = _mm_mul_ps(_mm_cvtepi32_ps(frac4), fracOne4);
57 const __m128 out = _mm_add_ps(val1, _mm_mul_ps(mu, r0));
59 _mm_store_ps(&dst[i], out);
61 frac4 = _mm_add_epi32(frac4, increment4);
62 pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
63 frac4 = _mm_and_si128(frac4, fracMask4);
65 pos_.i[0] = _mm_extract_epi32(pos4, 0);
66 pos_.i[1] = _mm_extract_epi32(pos4, 1);
67 pos_.i[2] = _mm_extract_epi32(pos4, 2);
68 pos_.i[3] = _mm_extract_epi32(pos4, 3);
71 /* NOTE: These four elements represent the position *after* the last four
72 * samples, so the lowest element is the next position to resample.
74 pos = pos_.i[0];
75 frac = _mm_cvtsi128_si32(frac4);
77 for(;i < numsamples;i++)
79 dst[i] = lerp(src[pos], src[pos+1], frac * (1.0f/FRACTIONONE));
81 frac += increment;
82 pos += frac>>FRACTIONBITS;
83 frac &= FRACTIONMASK;
85 return dst;
88 const ALfloat *Resample_fir4_32_SSE41(const BsincState* UNUSED(state), const ALfloat *restrict src,
89 ALuint frac, ALuint increment, ALfloat *restrict dst,
90 ALuint numsamples)
92 const __m128i increment4 = _mm_set1_epi32(increment*4);
93 const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
94 union { alignas(16) ALuint i[4]; float f[4]; } pos_;
95 union { alignas(16) ALuint i[4]; float f[4]; } frac_;
96 __m128i frac4, pos4;
97 ALuint pos;
98 ALuint i;
100 InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
102 frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
103 pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
105 --src;
106 for(i = 0;numsamples-i > 3;i += 4)
108 const __m128 val0 = _mm_loadu_ps(&src[pos_.i[0]]);
109 const __m128 val1 = _mm_loadu_ps(&src[pos_.i[1]]);
110 const __m128 val2 = _mm_loadu_ps(&src[pos_.i[2]]);
111 const __m128 val3 = _mm_loadu_ps(&src[pos_.i[3]]);
112 __m128 k0 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[0]]);
113 __m128 k1 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[1]]);
114 __m128 k2 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[2]]);
115 __m128 k3 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[3]]);
116 __m128 out;
118 k0 = _mm_mul_ps(k0, val0);
119 k1 = _mm_mul_ps(k1, val1);
120 k2 = _mm_mul_ps(k2, val2);
121 k3 = _mm_mul_ps(k3, val3);
122 k0 = _mm_hadd_ps(k0, k1);
123 k2 = _mm_hadd_ps(k2, k3);
124 out = _mm_hadd_ps(k0, k2);
126 _mm_store_ps(&dst[i], out);
128 frac4 = _mm_add_epi32(frac4, increment4);
129 pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
130 frac4 = _mm_and_si128(frac4, fracMask4);
132 pos_.i[0] = _mm_extract_epi32(pos4, 0);
133 pos_.i[1] = _mm_extract_epi32(pos4, 1);
134 pos_.i[2] = _mm_extract_epi32(pos4, 2);
135 pos_.i[3] = _mm_extract_epi32(pos4, 3);
136 frac_.i[0] = _mm_extract_epi32(frac4, 0);
137 frac_.i[1] = _mm_extract_epi32(frac4, 1);
138 frac_.i[2] = _mm_extract_epi32(frac4, 2);
139 frac_.i[3] = _mm_extract_epi32(frac4, 3);
142 pos = pos_.i[0];
143 frac = frac_.i[0];
145 for(;i < numsamples;i++)
147 dst[i] = resample_fir4(src[pos], src[pos+1], src[pos+2], src[pos+3], frac);
149 frac += increment;
150 pos += frac>>FRACTIONBITS;
151 frac &= FRACTIONMASK;
153 return dst;
156 const ALfloat *Resample_fir8_32_SSE41(const BsincState* UNUSED(state), const ALfloat *restrict src,
157 ALuint frac, ALuint increment, ALfloat *restrict dst,
158 ALuint numsamples)
160 const __m128i increment4 = _mm_set1_epi32(increment*4);
161 const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
162 union { alignas(16) ALuint i[4]; float f[4]; } pos_;
163 union { alignas(16) ALuint i[4]; float f[4]; } frac_;
164 __m128i frac4, pos4;
165 ALuint pos;
166 ALuint i, j;
168 InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
170 frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
171 pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
173 src -= 3;
174 for(i = 0;numsamples-i > 3;i += 4)
176 __m128 out[2];
177 for(j = 0;j < 8;j+=4)
179 const __m128 val0 = _mm_loadu_ps(&src[pos_.i[0]+j]);
180 const __m128 val1 = _mm_loadu_ps(&src[pos_.i[1]+j]);
181 const __m128 val2 = _mm_loadu_ps(&src[pos_.i[2]+j]);
182 const __m128 val3 = _mm_loadu_ps(&src[pos_.i[3]+j]);
183 __m128 k0 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[0]][j]);
184 __m128 k1 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[1]][j]);
185 __m128 k2 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[2]][j]);
186 __m128 k3 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[3]][j]);
188 k0 = _mm_mul_ps(k0, val0);
189 k1 = _mm_mul_ps(k1, val1);
190 k2 = _mm_mul_ps(k2, val2);
191 k3 = _mm_mul_ps(k3, val3);
192 k0 = _mm_hadd_ps(k0, k1);
193 k2 = _mm_hadd_ps(k2, k3);
194 out[j>>2] = _mm_hadd_ps(k0, k2);
197 out[0] = _mm_add_ps(out[0], out[1]);
198 _mm_store_ps(&dst[i], out[0]);
200 frac4 = _mm_add_epi32(frac4, increment4);
201 pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
202 frac4 = _mm_and_si128(frac4, fracMask4);
204 pos_.i[0] = _mm_extract_epi32(pos4, 0);
205 pos_.i[1] = _mm_extract_epi32(pos4, 1);
206 pos_.i[2] = _mm_extract_epi32(pos4, 2);
207 pos_.i[3] = _mm_extract_epi32(pos4, 3);
208 frac_.i[0] = _mm_extract_epi32(frac4, 0);
209 frac_.i[1] = _mm_extract_epi32(frac4, 1);
210 frac_.i[2] = _mm_extract_epi32(frac4, 2);
211 frac_.i[3] = _mm_extract_epi32(frac4, 3);
214 pos = pos_.i[0];
215 frac = frac_.i[0];
217 for(;i < numsamples;i++)
219 dst[i] = resample_fir8(src[pos ], src[pos+1], src[pos+2], src[pos+3],
220 src[pos+4], src[pos+5], src[pos+6], src[pos+7], frac);
222 frac += increment;
223 pos += frac>>FRACTIONBITS;
224 frac &= FRACTIONMASK;
226 return dst;