1 /* MikMod sound library
2 (c) 1998, 1999, 2000, 2001, 2002 Miodrag Vallat and others - see file
3 AUTHORS for complete list.
5 This library is free software; you can redistribute it and/or modify
6 it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of
8 the License, or (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 /*==============================================================================
23 $Id: virtch.c,v 1.4 2005/05/18 13:42:23 raphassenat Exp $
25 Sample mixing routines, using a 32 bits mixing buffer.
27 ==============================================================================*/
31 Optional features include:
32 (a) 4-step reverb (for 16 bit output only)
33 (b) Interpolation of sample data during mixing
34 (c) Dolby Surround Sound
50 #include "mikmod_internals.h"
58 Controls the maximum volume of the sound output. All data is shifted
59 right by BITSHIFT after being mixed. Higher values result in quieter
60 sound and less chance of distortion.
63 Controls the duration of the reverb. Larger values represent a shorter
64 reverb loop. Smaller values extend the reverb but can result in more of
70 #define REVERBERATION 110000L
73 #define FRACMASK ((1L<<FRACBITS)-1L)
75 #define TICKLSIZE 8192
76 #define TICKWSIZE (TICKLSIZE<<1)
77 #define TICKBSIZE (TICKWSIZE<<1)
80 #define CLICK_BUFFER (1L<<CLICK_SHIFT)
83 #define MIN(a,b) (((a)<(b)) ? (a) : (b))
86 typedef struct VINFO
{
87 UBYTE kick
; /* =1 -> sample has to be restarted */
88 UBYTE active
; /* =1 -> sample is playing */
89 UWORD flags
; /* 16/8 bits looping/one-shot */
90 SWORD handle
; /* identifies the sample */
91 ULONG start
; /* start index */
92 ULONG size
; /* samplesize */
93 ULONG reppos
; /* loop start */
94 ULONG repend
; /* loop end */
95 ULONG frq
; /* current frequency */
96 int vol
; /* current volume */
97 int pan
; /* current panning position */
100 int lvolsel
,rvolsel
; /* Volume factor in range 0-255 */
103 SLONGLONG current
; /* current index in the sample */
104 SLONGLONG increment
; /* increment value */
107 static SWORD
**Samples
;
108 static VINFO
*vinf
=NULL
,*vnf
;
109 static long tickleft
,samplesthatfit
,vc_memory
=0;
110 static int vc_softchn
;
111 static SLONGLONG idxsize
,idxlpos
,idxlend
;
112 static SLONG
*vc_tickbuf
=NULL
;
113 static UWORD vc_mode
;
115 /* Reverb control variables */
117 static int RVc1
, RVc2
, RVc3
, RVc4
, RVc5
, RVc6
, RVc7
, RVc8
;
118 static ULONG RVRindex
;
120 /* For Mono or Left Channel */
121 static SLONG
*RVbufL1
=NULL
,*RVbufL2
=NULL
,*RVbufL3
=NULL
,*RVbufL4
=NULL
,
122 *RVbufL5
=NULL
,*RVbufL6
=NULL
,*RVbufL7
=NULL
,*RVbufL8
=NULL
;
124 /* For Stereo only (Right Channel) */
125 static SLONG
*RVbufR1
=NULL
,*RVbufR2
=NULL
,*RVbufR3
=NULL
,*RVbufR4
=NULL
,
126 *RVbufR5
=NULL
,*RVbufR6
=NULL
,*RVbufR7
=NULL
,*RVbufR8
=NULL
;
128 #ifdef NATIVE_64BIT_INT
129 #define NATIVE SLONGLONG
133 #if defined HAVE_SSE2 || defined HAVE_ALTIVEC
135 static size_t MixSIMDMonoNormal(const SWORD
* srce
,SLONG
* dest
,size_t index
, size_t increment
,size_t todo
)
139 SLONG lvolsel
= vnf
->lvolsel
;
142 sample
= srce
[index
>> FRACBITS
];
145 *dest
++ += lvolsel
* sample
;
150 static size_t MixSIMDStereoNormal(const SWORD
* srce
, SLONG
* dest
, size_t index
, size_t increment
,size_t todo
)
152 SWORD vol
[8] = {vnf
->lvolsel
, vnf
->rvolsel
};
156 // Dest can be misaligned ...
157 while(!IS_ALIGNED_16(dest
)) {
158 sample
=srce
[(index
+= increment
) >> FRACBITS
];
159 *dest
++ += vol
[0] * sample
;
160 *dest
++ += vol
[1] * sample
;
164 // Srce is always aligned ...
166 #if defined HAVE_SSE2
169 __m128i v0
= _mm_set_epi16(0, vol
[1],
173 for(todo
>>=2;todo
; todo
--)
175 SWORD s0
= srce
[(index
+= increment
) >> FRACBITS
];
176 SWORD s1
= srce
[(index
+= increment
) >> FRACBITS
];
177 SWORD s2
= srce
[(index
+= increment
) >> FRACBITS
];
178 SWORD s3
= srce
[(index
+= increment
) >> FRACBITS
];
179 __m128i v1
= _mm_set_epi16(0, s1
, 0, s1
, 0, s0
, 0, s0
);
180 __m128i v2
= _mm_set_epi16(0, s3
, 0, s3
, 0, s2
, 0, s2
);
181 __m128i v3
= _mm_load_si128((__m128i
*)(dest
+0));
182 __m128i v4
= _mm_load_si128((__m128i
*)(dest
+4));
183 _mm_store_si128((__m128i
*)(dest
+0), _mm_add_epi32(v3
, _mm_madd_epi16(v0
, v1
)));
184 _mm_store_si128((__m128i
*)(dest
+4), _mm_add_epi32(v4
, _mm_madd_epi16(v0
, v2
)));
189 #elif defined HAVE_ALTIVEC
192 vector
signed short r0
= vec_ld(0, vol
);
193 vector
signed short v0
= vec_perm(r0
, r0
, (vector
unsigned char)(0, 1, // l
204 for(todo
>>=2;todo
; todo
--)
207 s
[0] = srce
[(index
+= increment
) >> FRACBITS
];
208 s
[1] = srce
[(index
+= increment
) >> FRACBITS
];
209 s
[2] = srce
[(index
+= increment
) >> FRACBITS
];
210 s
[3] = srce
[(index
+= increment
) >> FRACBITS
];
213 vector
short int r1
= vec_ld(0, s
);
214 vector
signed short v1
= vec_perm(r1
, r1
, (vector
unsigned char)(0*2, 0*2+1, // s0
224 vector
signed short v2
= vec_perm(r1
, r1
, (vector
unsigned char)(2*2, 2*2+1, // s2
233 vector
signed int v3
= vec_ld(0, dest
);
234 vector
signed int v4
= vec_ld(0, dest
+ 4);
235 vector
signed int v5
= vec_mule(v0
, v1
);
236 vector
signed int v6
= vec_mule(v0
, v2
);
238 vec_st(vec_add(v3
, v5
), 0, dest
);
239 vec_st(vec_add(v4
, v6
), 0x10, dest
);
244 #endif // HAVE_ALTIVEC
246 // Remaining bits ...
248 sample
=srce
[(index
+= increment
) >> FRACBITS
];
250 *dest
++ += vol
[0] * sample
;
251 *dest
++ += vol
[1] * sample
;
257 /*========== 32 bit sample mixers - only for 32 bit platforms */
258 #ifndef NATIVE_64BIT_INT
260 static SLONG
Mix32MonoNormal(const SWORD
* srce
,SLONG
* dest
,SLONG index
,SLONG increment
,SLONG todo
)
262 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
263 if (md_mode
& DMODE_SIMDMIXER
)
265 return MixSIMDMonoNormal(srce
, dest
, index
, increment
, todo
);
271 SLONG lvolsel
= vnf
->lvolsel
;
274 sample
= srce
[index
>> FRACBITS
];
277 *dest
++ += lvolsel
* sample
;
283 // FIXME: This mixer should works also on 64-bit platform
284 // Hint : changes SLONG / SLONGLONG mess with size_t
285 static SLONG
Mix32StereoNormal(const SWORD
* srce
,SLONG
* dest
,SLONG index
,SLONG increment
,SLONG todo
)
287 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
288 if (md_mode
& DMODE_SIMDMIXER
)
290 return MixSIMDStereoNormal(srce
, dest
, index
, increment
, todo
);
296 SLONG lvolsel
= vnf
->lvolsel
;
297 SLONG rvolsel
= vnf
->rvolsel
;
300 sample
=srce
[(index
+= increment
) >> FRACBITS
];
302 *dest
++ += lvolsel
* sample
;
303 *dest
++ += rvolsel
* sample
;
310 static SLONG
Mix32SurroundNormal(const SWORD
* srce
,SLONG
* dest
,SLONG index
,SLONG increment
,SLONG todo
)
313 SLONG lvolsel
= vnf
->lvolsel
;
314 SLONG rvolsel
= vnf
->rvolsel
;
316 if (lvolsel
>=rvolsel
) {
318 sample
= srce
[index
>> FRACBITS
];
321 *dest
++ += lvolsel
*sample
;
322 *dest
++ -= lvolsel
*sample
;
326 sample
= srce
[index
>> FRACBITS
];
329 *dest
++ -= rvolsel
*sample
;
330 *dest
++ += rvolsel
*sample
;
336 static SLONG
Mix32MonoInterp(const SWORD
* srce
,SLONG
* dest
,SLONG index
,SLONG increment
,SLONG todo
)
339 SLONG lvolsel
= vnf
->lvolsel
;
340 SLONG rampvol
= vnf
->rampvol
;
343 SLONG oldlvol
= vnf
->oldlvol
- lvolsel
;
345 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
346 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
347 *(index
&FRACMASK
)>>FRACBITS
);
350 *dest
++ += ((lvolsel
<< CLICK_SHIFT
) + oldlvol
* rampvol
)
351 * sample
>> CLICK_SHIFT
;
355 vnf
->rampvol
= rampvol
;
361 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
362 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
363 *(index
&FRACMASK
)>>FRACBITS
);
366 *dest
++ += lvolsel
* sample
;
371 static SLONG
Mix32StereoInterp(const SWORD
* srce
,SLONG
* dest
,SLONG index
,SLONG increment
,SLONG todo
)
374 SLONG lvolsel
= vnf
->lvolsel
;
375 SLONG rvolsel
= vnf
->rvolsel
;
376 SLONG rampvol
= vnf
->rampvol
;
379 SLONG oldlvol
= vnf
->oldlvol
- lvolsel
;
380 SLONG oldrvol
= vnf
->oldrvol
- rvolsel
;
382 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
383 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
384 *(index
&FRACMASK
)>>FRACBITS
);
387 *dest
++ += ((lvolsel
<< CLICK_SHIFT
) + oldlvol
* rampvol
)
388 * sample
>> CLICK_SHIFT
;
389 *dest
++ += ((rvolsel
<< CLICK_SHIFT
) + oldrvol
* rampvol
)
390 * sample
>> CLICK_SHIFT
;
394 vnf
->rampvol
= rampvol
;
400 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
401 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
402 *(index
&FRACMASK
)>>FRACBITS
);
405 *dest
++ += lvolsel
* sample
;
406 *dest
++ += rvolsel
* sample
;
411 static SLONG
Mix32SurroundInterp(const SWORD
* srce
,SLONG
* dest
,SLONG index
,SLONG increment
,SLONG todo
)
414 SLONG lvolsel
= vnf
->lvolsel
;
415 SLONG rvolsel
= vnf
->rvolsel
;
416 SLONG rampvol
= vnf
->rampvol
;
419 if (lvolsel
>= rvolsel
) {
421 oldvol
= vnf
->oldlvol
;
424 oldvol
= vnf
->oldrvol
;
430 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
431 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
432 *(index
&FRACMASK
)>>FRACBITS
);
435 sample
=((vol
<< CLICK_SHIFT
) + oldvol
* rampvol
)
436 * sample
>> CLICK_SHIFT
;
443 vnf
->rampvol
= rampvol
;
449 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
450 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
451 *(index
&FRACMASK
)>>FRACBITS
);
454 *dest
++ += vol
*sample
;
455 *dest
++ -= vol
*sample
;
461 /*========== 64 bit sample mixers - all platforms */
463 static SLONGLONG
MixMonoNormal(const SWORD
* srce
,SLONG
* dest
,SLONGLONG index
,SLONGLONG increment
,SLONG todo
)
466 SLONG lvolsel
= vnf
->lvolsel
;
469 sample
= srce
[index
>> FRACBITS
];
472 *dest
++ += lvolsel
* sample
;
477 static SLONGLONG
MixStereoNormal(const SWORD
* srce
,SLONG
* dest
,SLONGLONG index
,SLONGLONG increment
,SLONG todo
)
480 SLONG lvolsel
= vnf
->lvolsel
;
481 SLONG rvolsel
= vnf
->rvolsel
;
484 sample
=srce
[index
>> FRACBITS
];
487 *dest
++ += lvolsel
* sample
;
488 *dest
++ += rvolsel
* sample
;
493 static SLONGLONG
MixSurroundNormal(const SWORD
* srce
,SLONG
* dest
,SLONGLONG index
,SLONGLONG increment
,SLONG todo
)
496 SLONG lvolsel
= vnf
->lvolsel
;
497 SLONG rvolsel
= vnf
->rvolsel
;
499 if(vnf
->lvolsel
>=vnf
->rvolsel
) {
501 sample
= srce
[index
>> FRACBITS
];
504 *dest
++ += lvolsel
*sample
;
505 *dest
++ -= lvolsel
*sample
;
509 sample
= srce
[index
>> FRACBITS
];
512 *dest
++ -= rvolsel
*sample
;
513 *dest
++ += rvolsel
*sample
;
519 static SLONGLONG
MixMonoInterp(const SWORD
* srce
,SLONG
* dest
,SLONGLONG index
,SLONGLONG increment
,SLONG todo
)
522 SLONG lvolsel
= vnf
->lvolsel
;
523 SLONG rampvol
= vnf
->rampvol
;
526 SLONG oldlvol
= vnf
->oldlvol
- lvolsel
;
528 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
529 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
530 *(index
&FRACMASK
)>>FRACBITS
);
533 *dest
++ += ((lvolsel
<< CLICK_SHIFT
) + oldlvol
* rampvol
)
534 * sample
>> CLICK_SHIFT
;
538 vnf
->rampvol
= rampvol
;
544 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
545 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
546 *(index
&FRACMASK
)>>FRACBITS
);
549 *dest
++ += lvolsel
* sample
;
554 static SLONGLONG
MixStereoInterp(const SWORD
* srce
,SLONG
* dest
,SLONGLONG index
,SLONGLONG increment
,SLONG todo
)
557 SLONG lvolsel
= vnf
->lvolsel
;
558 SLONG rvolsel
= vnf
->rvolsel
;
559 SLONG rampvol
= vnf
->rampvol
;
562 SLONG oldlvol
= vnf
->oldlvol
- lvolsel
;
563 SLONG oldrvol
= vnf
->oldrvol
- rvolsel
;
565 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
566 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
567 *(index
&FRACMASK
)>>FRACBITS
);
570 *dest
++ +=((lvolsel
<< CLICK_SHIFT
) + oldlvol
* rampvol
)
571 * sample
>> CLICK_SHIFT
;
572 *dest
++ +=((rvolsel
<< CLICK_SHIFT
) + oldrvol
* rampvol
)
573 * sample
>> CLICK_SHIFT
;
577 vnf
->rampvol
= rampvol
;
583 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
584 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
585 *(index
&FRACMASK
)>>FRACBITS
);
588 *dest
++ += lvolsel
* sample
;
589 *dest
++ += rvolsel
* sample
;
594 static SLONGLONG
MixSurroundInterp(const SWORD
* srce
,SLONG
* dest
,SLONGLONG index
,SLONGLONG increment
,SLONG todo
)
597 SLONG lvolsel
= vnf
->lvolsel
;
598 SLONG rvolsel
= vnf
->rvolsel
;
599 SLONG rampvol
= vnf
->rampvol
;
602 if (lvolsel
>= rvolsel
) {
604 oldvol
= vnf
->oldlvol
;
607 oldvol
= vnf
->oldrvol
;
613 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
614 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
615 *(index
&FRACMASK
)>>FRACBITS
);
618 sample
=((vol
<< CLICK_SHIFT
) + oldvol
* rampvol
)
619 * sample
>> CLICK_SHIFT
;
625 vnf
->rampvol
= rampvol
;
631 sample
=(SLONG
)srce
[index
>>FRACBITS
]+
632 ((SLONG
)(srce
[(index
>>FRACBITS
)+1]-srce
[index
>>FRACBITS
])
633 *(index
&FRACMASK
)>>FRACBITS
);
636 *dest
++ += vol
*sample
;
637 *dest
++ -= vol
*sample
;
642 static void (*MixReverb
)(SLONG
* srce
,NATIVE count
);
645 #define COMPUTE_LOC(n) loc##n = RVRindex % RVc##n
646 #define COMPUTE_LECHO(n) RVbufL##n [loc##n ]=speedup+((ReverbPct*RVbufL##n [loc##n ])>>7)
647 #define COMPUTE_RECHO(n) RVbufR##n [loc##n ]=speedup+((ReverbPct*RVbufR##n [loc##n ])>>7)
649 static void MixReverb_Normal(SLONG
* srce
,NATIVE count
)
651 unsigned int speedup
;
653 unsigned int loc1
,loc2
,loc3
,loc4
;
654 unsigned int loc5
,loc6
,loc7
,loc8
;
656 ReverbPct
=58+(md_reverb
<<2);
658 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
659 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
662 /* Compute the left channel echo buffers */
663 speedup
= *srce
>> 3;
665 COMPUTE_LECHO(1); COMPUTE_LECHO(2); COMPUTE_LECHO(3); COMPUTE_LECHO(4);
666 COMPUTE_LECHO(5); COMPUTE_LECHO(6); COMPUTE_LECHO(7); COMPUTE_LECHO(8);
668 /* Prepare to compute actual finalized data */
671 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
672 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
675 *srce
++ +=RVbufL1
[loc1
]-RVbufL2
[loc2
]+RVbufL3
[loc3
]-RVbufL4
[loc4
]+
676 RVbufL5
[loc5
]-RVbufL6
[loc6
]+RVbufL7
[loc7
]-RVbufL8
[loc8
];
680 static void MixReverb_Stereo(SLONG
* srce
,NATIVE count
)
682 unsigned int speedup
;
684 unsigned int loc1
, loc2
, loc3
, loc4
;
685 unsigned int loc5
, loc6
, loc7
, loc8
;
687 ReverbPct
= 92+(md_reverb
<<1);
689 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
690 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
693 /* Compute the left channel echo buffers */
694 speedup
= *srce
>> 3;
696 COMPUTE_LECHO(1); COMPUTE_LECHO(2); COMPUTE_LECHO(3); COMPUTE_LECHO(4);
697 COMPUTE_LECHO(5); COMPUTE_LECHO(6); COMPUTE_LECHO(7); COMPUTE_LECHO(8);
699 /* Compute the right channel echo buffers */
700 speedup
= srce
[1] >> 3;
702 COMPUTE_RECHO(1); COMPUTE_RECHO(2); COMPUTE_RECHO(3); COMPUTE_RECHO(4);
703 COMPUTE_RECHO(5); COMPUTE_RECHO(6); COMPUTE_RECHO(7); COMPUTE_RECHO(8);
705 /* Prepare to compute actual finalized data */
708 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
709 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
711 /* left channel then right channel */
712 *srce
++ +=RVbufL1
[loc1
]-RVbufL2
[loc2
]+RVbufL3
[loc3
]-RVbufL4
[loc4
]+
713 RVbufL5
[loc5
]-RVbufL6
[loc6
]+RVbufL7
[loc7
]-RVbufL8
[loc8
];
715 *srce
++ +=RVbufR1
[loc1
]-RVbufR2
[loc2
]+RVbufR3
[loc3
]-RVbufR4
[loc4
]+
716 RVbufR5
[loc5
]-RVbufR6
[loc6
]+RVbufR7
[loc7
]-RVbufR8
[loc8
];
720 static void (*MixLowPass
)(SLONG
* srce
,NATIVE count
);
722 static int nLeftNR
, nRightNR
;
724 static void MixLowPass_Stereo(SLONG
* srce
,NATIVE count
)
726 int n1
= nLeftNR
, n2
= nRightNR
;
731 int vnr
= pnr
[0] >> 1;
743 static void MixLowPass_Normal(SLONG
* srce
,NATIVE count
)
750 int vnr
= pnr
[0] >> 1;
758 /* shifting fudge factor for FP scaling, should be 0 < FP_SHIFT < BITSHIFT */
762 #define EXTRACT_SAMPLE_FP(var,size) var=(*srce++>>(BITSHIFT-size)) * ((1.0f / 32768.0f) / (1 << size))
763 #define CHECK_SAMPLE_FP(var,bound) var=(var>bound)?bound:(var<-bound)?-bound:var
764 #define PUT_SAMPLE_FP(var) *dste++=var
766 static void Mix32ToFP(float* dste
,const SLONG
*srce
,NATIVE count
)
772 for(count
>>=2;count
;count
--) {
773 EXTRACT_SAMPLE_FP(x1
,FP_SHIFT
); EXTRACT_SAMPLE_FP(x2
,FP_SHIFT
);
774 EXTRACT_SAMPLE_FP(x3
,FP_SHIFT
); EXTRACT_SAMPLE_FP(x4
,FP_SHIFT
);
776 CHECK_SAMPLE_FP(x1
,1.0f
); CHECK_SAMPLE_FP(x2
,1.0f
);
777 CHECK_SAMPLE_FP(x3
,1.0f
); CHECK_SAMPLE_FP(x4
,1.0f
);
779 PUT_SAMPLE_FP(x1
); PUT_SAMPLE_FP(x2
);
780 PUT_SAMPLE_FP(x3
); PUT_SAMPLE_FP(x4
);
783 EXTRACT_SAMPLE_FP(x1
,FP_SHIFT
);
784 CHECK_SAMPLE_FP(x1
,1.0f
);
791 #define EXTRACT_SAMPLE(var,size) var=*srce++>>(BITSHIFT+16-size)
792 #define CHECK_SAMPLE(var,bound) var=(var>=bound)?bound-1:(var<-bound)?-bound:var
793 #define PUT_SAMPLE(var) *dste++=var
795 static void Mix32To16(SWORD
* dste
,const SLONG
*srce
,NATIVE count
)
801 for(count
>>=2;count
;count
--) {
802 EXTRACT_SAMPLE(x1
,16); EXTRACT_SAMPLE(x2
,16);
803 EXTRACT_SAMPLE(x3
,16); EXTRACT_SAMPLE(x4
,16);
805 CHECK_SAMPLE(x1
,32768); CHECK_SAMPLE(x2
,32768);
806 CHECK_SAMPLE(x3
,32768); CHECK_SAMPLE(x4
,32768);
808 PUT_SAMPLE(x1
); PUT_SAMPLE(x2
); PUT_SAMPLE(x3
); PUT_SAMPLE(x4
);
811 EXTRACT_SAMPLE(x1
,16);
812 CHECK_SAMPLE(x1
,32768);
817 static void Mix32To8(SBYTE
* dste
,const SLONG
*srce
,NATIVE count
)
823 for(count
>>=2;count
;count
--) {
824 EXTRACT_SAMPLE(x1
,8); EXTRACT_SAMPLE(x2
,8);
825 EXTRACT_SAMPLE(x3
,8); EXTRACT_SAMPLE(x4
,8);
827 CHECK_SAMPLE(x1
,128); CHECK_SAMPLE(x2
,128);
828 CHECK_SAMPLE(x3
,128); CHECK_SAMPLE(x4
,128);
830 PUT_SAMPLE(x1
+128); PUT_SAMPLE(x2
+128);
831 PUT_SAMPLE(x3
+128); PUT_SAMPLE(x4
+128);
834 EXTRACT_SAMPLE(x1
,8);
835 CHECK_SAMPLE(x1
,128);
840 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
842 // Mix 32bit input to floating point. 32 samples per iteration
844 static void Mix32ToFP_SIMD(float* dste
,SLONG
* srce
,NATIVE count
)
848 while(!IS_ALIGNED_16(dste
) || !IS_ALIGNED_16(srce
))
851 EXTRACT_SAMPLE_FP(x1
,FP_SHIFT
);
852 CHECK_SAMPLE_FP(x1
,1.0f
);
863 const float k
= ((1.0f
/ 32768.0f
) / (1 << FP_SHIFT
));
865 simd_m128 xk
= LOAD_PS1_SIMD(&k
); // Scale factor
867 for(count
>>=3;count
;count
--) {
868 EXTRACT_SAMPLE_SIMD_F(srce
, x1
, FP_SHIFT
, xk
); // Load 4 samples
869 EXTRACT_SAMPLE_SIMD_F(srce
+4, x2
, FP_SHIFT
, xk
); // Load 4 samples
870 PUT_SAMPLE_SIMD_F(dste
, x1
); // Store 4 samples
871 PUT_SAMPLE_SIMD_F(dste
+4, x2
); // Store 4 samples
877 EXTRACT_SAMPLE_SIMD_F(srce
, x1
, FP_SHIFT
, xk
); // Load 4 samples
878 PUT_SAMPLE_SIMD_F(dste
, x1
); // Store 4 samples
886 EXTRACT_SAMPLE_FP(x1
,FP_SHIFT
);
887 CHECK_SAMPLE_FP(x1
,1.0f
);
892 static void Mix32To16_SIMD(SWORD
* dste
,SLONG
* srce
,NATIVE count
)
896 while(!IS_ALIGNED_16(dste
) || !IS_ALIGNED_16(srce
))
899 EXTRACT_SAMPLE(x1
,16);
900 CHECK_SAMPLE(x1
,32768);
911 for(count
>>=3;count
;count
--)
914 EXTRACT_SAMPLE_SIMD_16(srce
, x1
); // Load 4 samples
915 EXTRACT_SAMPLE_SIMD_16(srce
+4, x2
); // Load 4 samples
916 PUT_SAMPLE_SIMD_W(dste
, x1
, x2
); // Store 8 samples
922 Mix32To16(dste
, srce
, remain
);
925 // Mix 32bit input to 8bit. 128 samples per iteration
927 static void Mix32To8_SIMD(SBYTE
* dste
,SLONG
* srce
,NATIVE count
)
931 while(!IS_ALIGNED_16(dste
) || !IS_ALIGNED_16(srce
))
934 EXTRACT_SAMPLE(x1
,8);
935 CHECK_SAMPLE(x1
,128);
946 for(count
>>=4;count
;count
--) {
947 simd_m128i x1
,x2
,x3
,x4
;
948 EXTRACT_SAMPLE_SIMD_8(srce
, x1
); // Load 4 samples
949 EXTRACT_SAMPLE_SIMD_8(srce
+4, x2
); // Load 4 samples
950 EXTRACT_SAMPLE_SIMD_8(srce
+8, x3
); // Load 4 samples
951 EXTRACT_SAMPLE_SIMD_8(srce
+12, x4
); // Load 4 samples
952 PUT_SAMPLE_SIMD_B(dste
, x1
, x2
, x3
, x4
); // Store 16 samples
957 Mix32To8(dste
, srce
, remain
);
964 static void AddChannel(SLONG
* ptr
,NATIVE todo
)
969 if(!(s
=Samples
[vnf
->handle
])) {
970 vnf
->current
= vnf
->active
= 0;
974 /* update the 'current' index so the sample loops, or stops playing if it
975 reached the end of the sample */
979 if(vnf
->flags
& SF_REVERSE
) {
980 /* The sample is playing in reverse */
981 if((vnf
->flags
&SF_LOOP
)&&(vnf
->current
<idxlpos
)) {
982 /* the sample is looping and has reached the loopstart index */
983 if(vnf
->flags
& SF_BIDI
) {
984 /* sample is doing bidirectional loops, so 'bounce' the
985 current index against the idxlpos */
986 vnf
->current
= idxlpos
+(idxlpos
-vnf
->current
);
987 vnf
->flags
&= ~SF_REVERSE
;
988 vnf
->increment
= -vnf
->increment
;
990 /* normal backwards looping, so set the current position to
992 vnf
->current
=idxlend
-(idxlpos
-vnf
->current
);
994 /* the sample is not looping, so check if it reached index 0 */
995 if(vnf
->current
< 0) {
996 /* playing index reached 0, so stop playing this sample */
997 vnf
->current
= vnf
->active
= 0;
1002 /* The sample is playing forward */
1003 if((vnf
->flags
& SF_LOOP
) &&
1004 (vnf
->current
>= idxlend
)) {
1005 /* the sample is looping, check the loopend index */
1006 if(vnf
->flags
& SF_BIDI
) {
1007 /* sample is doing bidirectional loops, so 'bounce' the
1008 current index against the idxlend */
1009 vnf
->flags
|= SF_REVERSE
;
1010 vnf
->increment
= -vnf
->increment
;
1011 vnf
->current
= idxlend
-(vnf
->current
-idxlend
);
1013 /* normal backwards looping, so set the current position
1015 vnf
->current
=idxlpos
+(vnf
->current
-idxlend
);
1017 /* sample is not looping, so check if it reached the last
1019 if(vnf
->current
>= idxsize
) {
1020 /* yes, so stop playing this sample */
1021 vnf
->current
= vnf
->active
= 0;
1027 end
=(vnf
->flags
&SF_REVERSE
)?(vnf
->flags
&SF_LOOP
)?idxlpos
:0:
1028 (vnf
->flags
&SF_LOOP
)?idxlend
:idxsize
;
1030 /* if the sample is not blocked... */
1031 if((end
==vnf
->current
)||(!vnf
->increment
))
1034 done
=MIN((end
-vnf
->current
)/vnf
->increment
+1,todo
);
1043 endpos
=vnf
->current
+done
*vnf
->increment
;
1046 #ifndef NATIVE_64BIT_INT
1047 /* use the 32 bit mixers as often as we can (they're much faster) */
1048 if((vnf
->current
<0x7fffffff)&&(endpos
<0x7fffffff)) {
1049 if((md_mode
& DMODE_INTERP
)) {
1050 if(vc_mode
& DMODE_STEREO
) {
1051 if((vnf
->pan
==PAN_SURROUND
)&&(md_mode
&DMODE_SURROUND
))
1052 vnf
->current
=Mix32SurroundInterp
1053 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1055 vnf
->current
=Mix32StereoInterp
1056 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1058 vnf
->current
=Mix32MonoInterp
1059 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1060 } else if(vc_mode
& DMODE_STEREO
) {
1061 if((vnf
->pan
==PAN_SURROUND
)&&(md_mode
&DMODE_SURROUND
))
1062 vnf
->current
=Mix32SurroundNormal
1063 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1066 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
1067 if (md_mode
& DMODE_SIMDMIXER
)
1068 vnf
->current
=MixSIMDStereoNormal
1069 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1073 vnf
->current
=Mix32StereoNormal
1074 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1077 vnf
->current
=Mix32MonoNormal
1078 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1082 if((md_mode
& DMODE_INTERP
)) {
1083 if(vc_mode
& DMODE_STEREO
) {
1084 if((vnf
->pan
==PAN_SURROUND
)&&(md_mode
&DMODE_SURROUND
))
1085 vnf
->current
=MixSurroundInterp
1086 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1088 vnf
->current
=MixStereoInterp
1089 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1091 vnf
->current
=MixMonoInterp
1092 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1093 } else if(vc_mode
& DMODE_STEREO
) {
1094 if((vnf
->pan
==PAN_SURROUND
)&&(md_mode
&DMODE_SURROUND
))
1095 vnf
->current
=MixSurroundNormal
1096 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1099 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
1100 if (md_mode
& DMODE_SIMDMIXER
)
1101 vnf
->current
=MixSIMDStereoNormal
1102 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1106 vnf
->current
=MixStereoNormal
1107 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1110 vnf
->current
=MixMonoNormal
1111 (s
,ptr
,vnf
->current
,vnf
->increment
,done
);
1114 /* update sample position */
1115 vnf
->current
=endpos
;
1118 ptr
+=(vc_mode
& DMODE_STEREO
)?(done
<<1):done
;
1123 #include "virtch_common.c"
1126 void VC1_WriteSamples(SBYTE
* buf
,ULONG todo
)
1128 int left
,portion
=0,count
;
1134 if(vc_mode
& DMODE_SOFT_MUSIC
) md_player();
1135 tickleft
=(md_mixfreq
*125L)/(md_bpm
*50L);
1137 left
= MIN(tickleft
, todo
);
1141 buf
+= samples2bytes(left
);
1144 portion
= MIN(left
, samplesthatfit
);
1145 count
= (vc_mode
& DMODE_STEREO
)?(portion
<<1):portion
;
1146 memset(vc_tickbuf
, 0, count
<<2);
1147 for(t
=0;t
<vc_softchn
;t
++) {
1151 vnf
->current
=((SLONGLONG
)vnf
->start
)<<FRACBITS
;
1156 if(!vnf
->frq
) vnf
->active
= 0;
1159 vnf
->increment
=((SLONGLONG
)(vnf
->frq
<<FRACBITS
))/md_mixfreq
;
1160 if(vnf
->flags
&SF_REVERSE
) vnf
->increment
=-vnf
->increment
;
1161 vol
= vnf
->vol
; pan
= vnf
->pan
;
1163 vnf
->oldlvol
=vnf
->lvolsel
;vnf
->oldrvol
=vnf
->rvolsel
;
1164 if(vc_mode
& DMODE_STEREO
) {
1165 if(pan
!= PAN_SURROUND
) {
1166 vnf
->lvolsel
=(vol
*(PAN_RIGHT
-pan
))>>8;
1167 vnf
->rvolsel
=(vol
*pan
)>>8;
1169 vnf
->lvolsel
=vnf
->rvolsel
=vol
/2;
1173 idxsize
= (vnf
->size
)? ((SLONGLONG
)vnf
->size
<< FRACBITS
)-1 : 0;
1174 idxlend
= (vnf
->repend
)? ((SLONGLONG
)vnf
->repend
<< FRACBITS
)-1 : 0;
1175 idxlpos
= (SLONGLONG
)vnf
->reppos
<< FRACBITS
;
1176 AddChannel(vc_tickbuf
, portion
);
1180 if(md_mode
& DMODE_NOISEREDUCTION
) {
1181 MixLowPass(vc_tickbuf
, portion
);
1185 if(md_reverb
>15) md_reverb
=15;
1186 MixReverb(vc_tickbuf
, portion
);
1190 vc_callback((unsigned char*)vc_tickbuf
, portion
);
1194 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
1195 if (md_mode
& DMODE_SIMDMIXER
)
1197 if(vc_mode
& DMODE_FLOAT
)
1198 Mix32ToFP_SIMD((float*) buffer
, vc_tickbuf
, count
);
1199 else if(vc_mode
& DMODE_16BITS
)
1200 Mix32To16_SIMD((SWORD
*) buffer
, vc_tickbuf
, count
);
1202 Mix32To8_SIMD((SBYTE
*) buffer
, vc_tickbuf
, count
);
1207 if(vc_mode
& DMODE_FLOAT
)
1208 Mix32ToFP((float*) buffer
, vc_tickbuf
, count
);
1209 else if(vc_mode
& DMODE_16BITS
)
1210 Mix32To16((SWORD
*) buffer
, vc_tickbuf
, count
);
1212 Mix32To8((SBYTE
*) buffer
, vc_tickbuf
, count
);
1214 buffer
+= samples2bytes(portion
);
1224 //if (md_mode&DMODE_HQMIXER)
1225 // return VC2_Init();
1227 if(!(Samples
=(SWORD
**)MikMod_calloc(MAXSAMPLEHANDLES
,sizeof(SWORD
*)))) {
1228 _mm_errno
= MMERR_INITIALIZING_MIXER
;
1232 if(!(vc_tickbuf
=(SLONG
*)MikMod_malloc((TICKLSIZE
+32)*sizeof(SLONG
)))) {
1233 _mm_errno
= MMERR_INITIALIZING_MIXER
;
1237 MixReverb
=(md_mode
&DMODE_STEREO
)?MixReverb_Stereo
:MixReverb_Normal
;
1238 MixLowPass
=(md_mode
&DMODE_STEREO
)?MixLowPass_Stereo
:MixLowPass_Normal
;
1243 int VC1_PlayStart(void)
1245 samplesthatfit
=TICKLSIZE
;
1246 if(vc_mode
& DMODE_STEREO
) samplesthatfit
>>= 1;
1249 RVc1
= (5000L * md_mixfreq
) / REVERBERATION
;
1250 RVc2
= (5078L * md_mixfreq
) / REVERBERATION
;
1251 RVc3
= (5313L * md_mixfreq
) / REVERBERATION
;
1252 RVc4
= (5703L * md_mixfreq
) / REVERBERATION
;
1253 RVc5
= (6250L * md_mixfreq
) / REVERBERATION
;
1254 RVc6
= (6953L * md_mixfreq
) / REVERBERATION
;
1255 RVc7
= (7813L * md_mixfreq
) / REVERBERATION
;
1256 RVc8
= (8828L * md_mixfreq
) / REVERBERATION
;
1258 if(!(RVbufL1
=(SLONG
*)MikMod_calloc((RVc1
+1),sizeof(SLONG
)))) return 1;
1259 if(!(RVbufL2
=(SLONG
*)MikMod_calloc((RVc2
+1),sizeof(SLONG
)))) return 1;
1260 if(!(RVbufL3
=(SLONG
*)MikMod_calloc((RVc3
+1),sizeof(SLONG
)))) return 1;
1261 if(!(RVbufL4
=(SLONG
*)MikMod_calloc((RVc4
+1),sizeof(SLONG
)))) return 1;
1262 if(!(RVbufL5
=(SLONG
*)MikMod_calloc((RVc5
+1),sizeof(SLONG
)))) return 1;
1263 if(!(RVbufL6
=(SLONG
*)MikMod_calloc((RVc6
+1),sizeof(SLONG
)))) return 1;
1264 if(!(RVbufL7
=(SLONG
*)MikMod_calloc((RVc7
+1),sizeof(SLONG
)))) return 1;
1265 if(!(RVbufL8
=(SLONG
*)MikMod_calloc((RVc8
+1),sizeof(SLONG
)))) return 1;
1267 if(!(RVbufR1
=(SLONG
*)MikMod_calloc((RVc1
+1),sizeof(SLONG
)))) return 1;
1268 if(!(RVbufR2
=(SLONG
*)MikMod_calloc((RVc2
+1),sizeof(SLONG
)))) return 1;
1269 if(!(RVbufR3
=(SLONG
*)MikMod_calloc((RVc3
+1),sizeof(SLONG
)))) return 1;
1270 if(!(RVbufR4
=(SLONG
*)MikMod_calloc((RVc4
+1),sizeof(SLONG
)))) return 1;
1271 if(!(RVbufR5
=(SLONG
*)MikMod_calloc((RVc5
+1),sizeof(SLONG
)))) return 1;
1272 if(!(RVbufR6
=(SLONG
*)MikMod_calloc((RVc6
+1),sizeof(SLONG
)))) return 1;
1273 if(!(RVbufR7
=(SLONG
*)MikMod_calloc((RVc7
+1),sizeof(SLONG
)))) return 1;
1274 if(!(RVbufR8
=(SLONG
*)MikMod_calloc((RVc8
+1),sizeof(SLONG
)))) return 1;
1280 void VC1_PlayStop(void)
1282 if(RVbufL1
) MikMod_free(RVbufL1
);
1283 if(RVbufL2
) MikMod_free(RVbufL2
);
1284 if(RVbufL3
) MikMod_free(RVbufL3
);
1285 if(RVbufL4
) MikMod_free(RVbufL4
);
1286 if(RVbufL5
) MikMod_free(RVbufL5
);
1287 if(RVbufL6
) MikMod_free(RVbufL6
);
1288 if(RVbufL7
) MikMod_free(RVbufL7
);
1289 if(RVbufL8
) MikMod_free(RVbufL8
);
1290 RVbufL1
=RVbufL2
=RVbufL3
=RVbufL4
=RVbufL5
=RVbufL6
=RVbufL7
=RVbufL8
=NULL
;
1291 if(RVbufR1
) MikMod_free(RVbufR1
);
1292 if(RVbufR2
) MikMod_free(RVbufR2
);
1293 if(RVbufR3
) MikMod_free(RVbufR3
);
1294 if(RVbufR4
) MikMod_free(RVbufR4
);
1295 if(RVbufR5
) MikMod_free(RVbufR5
);
1296 if(RVbufR6
) MikMod_free(RVbufR6
);
1297 if(RVbufR7
) MikMod_free(RVbufR7
);
1298 if(RVbufR8
) MikMod_free(RVbufR8
);
1299 RVbufR1
=RVbufR2
=RVbufR3
=RVbufR4
=RVbufR5
=RVbufR6
=RVbufR7
=RVbufR8
=NULL
;
1302 int VC1_SetNumVoices(void)
1306 if(!(vc_softchn
=md_softchn
)) return 0;
1308 if(vinf
) MikMod_free(vinf
);
1309 if(!(vinf
= MikMod_calloc(sizeof(VINFO
),vc_softchn
))) return 1;
1311 for(t
=0;t
<vc_softchn
;t
++) {
1313 vinf
[t
].pan
=(t
&1)?PAN_LEFT
:PAN_RIGHT
;