Clean up multiple definitions of RAM size. Remove -DMEM (make) and MEM (code), use...
[maemo-rb.git] / apps / plugins / mikmod / virtch.c
blob43a61a3c2c0ca87abed196377986b2529cb1911d
1 /* MikMod sound library
2 (c) 1998, 1999, 2000, 2001, 2002 Miodrag Vallat and others - see file
3 AUTHORS for complete list.
5 This library is free software; you can redistribute it and/or modify
6 it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of
8 the License, or (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
18 02111-1307, USA.
21 /*==============================================================================
23 $Id: virtch.c,v 1.4 2005/05/18 13:42:23 raphassenat Exp $
25 Sample mixing routines, using a 32 bits mixing buffer.
27 ==============================================================================*/
31 Optional features include:
32 (a) 4-step reverb (for 16 bit output only)
33 (b) Interpolation of sample data during mixing
34 (c) Dolby Surround Sound
36 #if 0
37 #include <assert.h>
38 #endif
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
44 #include <stddef.h>
45 #ifdef HAVE_MEMORY_H
46 #include <memory.h>
47 #endif
48 #include <string.h>
50 #include "mikmod_internals.h"
51 #include "mikmod.h"
54 Constant definitions
55 ====================
57 BITSHIFT
58 Controls the maximum volume of the sound output. All data is shifted
59 right by BITSHIFT after being mixed. Higher values result in quieter
60 sound and less chance of distortion.
62 REVERBERATION
63 Controls the duration of the reverb. Larger values represent a shorter
64 reverb loop. Smaller values extend the reverb but can result in more of
65 an echo-ish sound.
69 #define BITSHIFT 9
70 #define REVERBERATION 110000L
72 #define FRACBITS 11
73 #define FRACMASK ((1L<<FRACBITS)-1L)
75 #define TICKLSIZE 8192
76 #define TICKWSIZE (TICKLSIZE<<1)
77 #define TICKBSIZE (TICKWSIZE<<1)
79 #define CLICK_SHIFT 6
80 #define CLICK_BUFFER (1L<<CLICK_SHIFT)
82 #ifndef MIN
83 #define MIN(a,b) (((a)<(b)) ? (a) : (b))
84 #endif
86 typedef struct VINFO {
87 UBYTE kick; /* =1 -> sample has to be restarted */
88 UBYTE active; /* =1 -> sample is playing */
89 UWORD flags; /* 16/8 bits looping/one-shot */
90 SWORD handle; /* identifies the sample */
91 ULONG start; /* start index */
92 ULONG size; /* samplesize */
93 ULONG reppos; /* loop start */
94 ULONG repend; /* loop end */
95 ULONG frq; /* current frequency */
96 int vol; /* current volume */
97 int pan; /* current panning position */
99 int rampvol;
100 int lvolsel,rvolsel; /* Volume factor in range 0-255 */
101 int oldlvol,oldrvol;
103 SLONGLONG current; /* current index in the sample */
104 SLONGLONG increment; /* increment value */
105 } VINFO;
107 static SWORD **Samples;
108 static VINFO *vinf=NULL,*vnf;
109 static long tickleft,samplesthatfit,vc_memory=0;
110 static int vc_softchn;
111 static SLONGLONG idxsize,idxlpos,idxlend;
112 static SLONG *vc_tickbuf=NULL;
113 static UWORD vc_mode;
115 /* Reverb control variables */
117 static int RVc1, RVc2, RVc3, RVc4, RVc5, RVc6, RVc7, RVc8;
118 static ULONG RVRindex;
120 /* For Mono or Left Channel */
121 static SLONG *RVbufL1=NULL,*RVbufL2=NULL,*RVbufL3=NULL,*RVbufL4=NULL,
122 *RVbufL5=NULL,*RVbufL6=NULL,*RVbufL7=NULL,*RVbufL8=NULL;
124 /* For Stereo only (Right Channel) */
125 static SLONG *RVbufR1=NULL,*RVbufR2=NULL,*RVbufR3=NULL,*RVbufR4=NULL,
126 *RVbufR5=NULL,*RVbufR6=NULL,*RVbufR7=NULL,*RVbufR8=NULL;
128 #ifdef NATIVE_64BIT_INT
129 #define NATIVE SLONGLONG
130 #else
131 #define NATIVE SLONG
132 #endif
133 #if defined HAVE_SSE2 || defined HAVE_ALTIVEC
135 static size_t MixSIMDMonoNormal(const SWORD* srce,SLONG* dest,size_t index, size_t increment,size_t todo)
137 // TODO:
138 SWORD sample;
139 SLONG lvolsel = vnf->lvolsel;
141 while(todo--) {
142 sample = srce[index >> FRACBITS];
143 index += increment;
145 *dest++ += lvolsel * sample;
147 return index;
150 static size_t MixSIMDStereoNormal(const SWORD* srce, SLONG* dest, size_t index, size_t increment,size_t todo)
152 SWORD vol[8] = {vnf->lvolsel, vnf->rvolsel};
153 SWORD sample;
154 SLONG remain = todo;
156 // Dest can be misaligned ...
157 while(!IS_ALIGNED_16(dest)) {
158 sample=srce[(index += increment) >> FRACBITS];
159 *dest++ += vol[0] * sample;
160 *dest++ += vol[1] * sample;
161 todo--;
164 // Srce is always aligned ...
166 #if defined HAVE_SSE2
167 remain = todo&3;
169 __m128i v0 = _mm_set_epi16(0, vol[1],
170 0, vol[0],
171 0, vol[1],
172 0, vol[0]);
173 for(todo>>=2;todo; todo--)
175 SWORD s0 = srce[(index += increment) >> FRACBITS];
176 SWORD s1 = srce[(index += increment) >> FRACBITS];
177 SWORD s2 = srce[(index += increment) >> FRACBITS];
178 SWORD s3 = srce[(index += increment) >> FRACBITS];
179 __m128i v1 = _mm_set_epi16(0, s1, 0, s1, 0, s0, 0, s0);
180 __m128i v2 = _mm_set_epi16(0, s3, 0, s3, 0, s2, 0, s2);
181 __m128i v3 = _mm_load_si128((__m128i*)(dest+0));
182 __m128i v4 = _mm_load_si128((__m128i*)(dest+4));
183 _mm_store_si128((__m128i*)(dest+0), _mm_add_epi32(v3, _mm_madd_epi16(v0, v1)));
184 _mm_store_si128((__m128i*)(dest+4), _mm_add_epi32(v4, _mm_madd_epi16(v0, v2)));
185 dest+=8;
189 #elif defined HAVE_ALTIVEC
190 remain = todo&3;
192 vector signed short r0 = vec_ld(0, vol);
193 vector signed short v0 = vec_perm(r0, r0, (vector unsigned char)(0, 1, // l
194 0, 1, // l
195 2, 3, // r
196 2, 1, // r
197 0, 1, // l
198 0, 1, // l
199 2, 3, // r
200 2, 3 // r
202 SWORD s[8];
204 for(todo>>=2;todo; todo--)
206 // Load constants
207 s[0] = srce[(index += increment) >> FRACBITS];
208 s[1] = srce[(index += increment) >> FRACBITS];
209 s[2] = srce[(index += increment) >> FRACBITS];
210 s[3] = srce[(index += increment) >> FRACBITS];
211 s[4] = 0;
213 vector short int r1 = vec_ld(0, s);
214 vector signed short v1 = vec_perm(r1, r1, (vector unsigned char)(0*2, 0*2+1, // s0
215 4*2, 4*2+1, // 0
216 0*2, 0*2+1, // s0
217 4*2, 4*2+1, // 0
218 1*2, 1*2+1, // s1
219 4*2, 4*2+1, // 0
220 1*2, 1*2+1, // s1
221 4*2, 4*2+1 // 0
224 vector signed short v2 = vec_perm(r1, r1, (vector unsigned char)(2*2, 2*2+1, // s2
225 4*2, 4*2+1, // 0
226 2*2, 2*2+1, // s2
227 4*2, 4*2+1, // 0
228 3*2, 3*2+1, // s3
229 4*2, 4*2+1, // 0
230 3*2, 3*2+1, // s3
231 4*2, 4*2+1 // 0
233 vector signed int v3 = vec_ld(0, dest);
234 vector signed int v4 = vec_ld(0, dest + 4);
235 vector signed int v5 = vec_mule(v0, v1);
236 vector signed int v6 = vec_mule(v0, v2);
238 vec_st(vec_add(v3, v5), 0, dest);
239 vec_st(vec_add(v4, v6), 0x10, dest);
241 dest+=8;
244 #endif // HAVE_ALTIVEC
246 // Remaining bits ...
247 while(remain--) {
248 sample=srce[(index += increment) >> FRACBITS];
250 *dest++ += vol[0] * sample;
251 *dest++ += vol[1] * sample;
253 return index;
255 #endif
257 /*========== 32 bit sample mixers - only for 32 bit platforms */
258 #ifndef NATIVE_64BIT_INT
260 static SLONG Mix32MonoNormal(const SWORD* srce,SLONG* dest,SLONG index,SLONG increment,SLONG todo)
262 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
263 if (md_mode & DMODE_SIMDMIXER)
265 return MixSIMDMonoNormal(srce, dest, index, increment, todo);
267 else
268 #endif
270 SWORD sample;
271 SLONG lvolsel = vnf->lvolsel;
273 while(todo--) {
274 sample = srce[index >> FRACBITS];
275 index += increment;
277 *dest++ += lvolsel * sample;
280 return index;
283 // FIXME: This mixer should works also on 64-bit platform
284 // Hint : changes SLONG / SLONGLONG mess with size_t
285 static SLONG Mix32StereoNormal(const SWORD* srce,SLONG* dest,SLONG index,SLONG increment,SLONG todo)
287 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
288 if (md_mode & DMODE_SIMDMIXER)
290 return MixSIMDStereoNormal(srce, dest, index, increment, todo);
292 else
293 #endif
295 SWORD sample;
296 SLONG lvolsel = vnf->lvolsel;
297 SLONG rvolsel = vnf->rvolsel;
299 while(todo--) {
300 sample=srce[(index += increment) >> FRACBITS];
302 *dest++ += lvolsel * sample;
303 *dest++ += rvolsel * sample;
306 return index;
310 static SLONG Mix32SurroundNormal(const SWORD* srce,SLONG* dest,SLONG index,SLONG increment,SLONG todo)
312 SWORD sample;
313 SLONG lvolsel = vnf->lvolsel;
314 SLONG rvolsel = vnf->rvolsel;
316 if (lvolsel>=rvolsel) {
317 while(todo--) {
318 sample = srce[index >> FRACBITS];
319 index += increment;
321 *dest++ += lvolsel*sample;
322 *dest++ -= lvolsel*sample;
324 } else {
325 while(todo--) {
326 sample = srce[index >> FRACBITS];
327 index += increment;
329 *dest++ -= rvolsel*sample;
330 *dest++ += rvolsel*sample;
333 return index;
336 static SLONG Mix32MonoInterp(const SWORD* srce,SLONG* dest,SLONG index,SLONG increment,SLONG todo)
338 SLONG sample;
339 SLONG lvolsel = vnf->lvolsel;
340 SLONG rampvol = vnf->rampvol;
342 if (rampvol) {
343 SLONG oldlvol = vnf->oldlvol - lvolsel;
344 while(todo--) {
345 sample=(SLONG)srce[index>>FRACBITS]+
346 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
347 *(index&FRACMASK)>>FRACBITS);
348 index += increment;
350 *dest++ += ((lvolsel << CLICK_SHIFT) + oldlvol * rampvol)
351 * sample >> CLICK_SHIFT;
352 if (!--rampvol)
353 break;
355 vnf->rampvol = rampvol;
356 if (todo < 0)
357 return index;
360 while(todo--) {
361 sample=(SLONG)srce[index>>FRACBITS]+
362 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
363 *(index&FRACMASK)>>FRACBITS);
364 index += increment;
366 *dest++ += lvolsel * sample;
368 return index;
371 static SLONG Mix32StereoInterp(const SWORD* srce,SLONG* dest,SLONG index,SLONG increment,SLONG todo)
373 SLONG sample;
374 SLONG lvolsel = vnf->lvolsel;
375 SLONG rvolsel = vnf->rvolsel;
376 SLONG rampvol = vnf->rampvol;
378 if (rampvol) {
379 SLONG oldlvol = vnf->oldlvol - lvolsel;
380 SLONG oldrvol = vnf->oldrvol - rvolsel;
381 while(todo--) {
382 sample=(SLONG)srce[index>>FRACBITS]+
383 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
384 *(index&FRACMASK)>>FRACBITS);
385 index += increment;
387 *dest++ += ((lvolsel << CLICK_SHIFT) + oldlvol * rampvol)
388 * sample >> CLICK_SHIFT;
389 *dest++ += ((rvolsel << CLICK_SHIFT) + oldrvol * rampvol)
390 * sample >> CLICK_SHIFT;
391 if (!--rampvol)
392 break;
394 vnf->rampvol = rampvol;
395 if (todo < 0)
396 return index;
399 while(todo--) {
400 sample=(SLONG)srce[index>>FRACBITS]+
401 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
402 *(index&FRACMASK)>>FRACBITS);
403 index += increment;
405 *dest++ += lvolsel * sample;
406 *dest++ += rvolsel * sample;
408 return index;
411 static SLONG Mix32SurroundInterp(const SWORD* srce,SLONG* dest,SLONG index,SLONG increment,SLONG todo)
413 SLONG sample;
414 SLONG lvolsel = vnf->lvolsel;
415 SLONG rvolsel = vnf->rvolsel;
416 SLONG rampvol = vnf->rampvol;
417 SLONG oldvol, vol;
419 if (lvolsel >= rvolsel) {
420 vol = lvolsel;
421 oldvol = vnf->oldlvol;
422 } else {
423 vol = rvolsel;
424 oldvol = vnf->oldrvol;
427 if (rampvol) {
428 oldvol -= vol;
429 while(todo--) {
430 sample=(SLONG)srce[index>>FRACBITS]+
431 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
432 *(index&FRACMASK)>>FRACBITS);
433 index += increment;
435 sample=((vol << CLICK_SHIFT) + oldvol * rampvol)
436 * sample >> CLICK_SHIFT;
437 *dest++ += sample;
438 *dest++ -= sample;
440 if (!--rampvol)
441 break;
443 vnf->rampvol = rampvol;
444 if (todo < 0)
445 return index;
448 while(todo--) {
449 sample=(SLONG)srce[index>>FRACBITS]+
450 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
451 *(index&FRACMASK)>>FRACBITS);
452 index += increment;
454 *dest++ += vol*sample;
455 *dest++ -= vol*sample;
457 return index;
459 #endif
461 /*========== 64 bit sample mixers - all platforms */
463 static SLONGLONG MixMonoNormal(const SWORD* srce,SLONG* dest,SLONGLONG index,SLONGLONG increment,SLONG todo)
465 SWORD sample;
466 SLONG lvolsel = vnf->lvolsel;
468 while(todo--) {
469 sample = srce[index >> FRACBITS];
470 index += increment;
472 *dest++ += lvolsel * sample;
474 return index;
477 static SLONGLONG MixStereoNormal(const SWORD* srce,SLONG* dest,SLONGLONG index,SLONGLONG increment,SLONG todo)
479 SWORD sample;
480 SLONG lvolsel = vnf->lvolsel;
481 SLONG rvolsel = vnf->rvolsel;
483 while(todo--) {
484 sample=srce[index >> FRACBITS];
485 index += increment;
487 *dest++ += lvolsel * sample;
488 *dest++ += rvolsel * sample;
490 return index;
493 static SLONGLONG MixSurroundNormal(const SWORD* srce,SLONG* dest,SLONGLONG index,SLONGLONG increment,SLONG todo)
495 SWORD sample;
496 SLONG lvolsel = vnf->lvolsel;
497 SLONG rvolsel = vnf->rvolsel;
499 if(vnf->lvolsel>=vnf->rvolsel) {
500 while(todo--) {
501 sample = srce[index >> FRACBITS];
502 index += increment;
504 *dest++ += lvolsel*sample;
505 *dest++ -= lvolsel*sample;
507 } else {
508 while(todo--) {
509 sample = srce[index >> FRACBITS];
510 index += increment;
512 *dest++ -= rvolsel*sample;
513 *dest++ += rvolsel*sample;
516 return index;
519 static SLONGLONG MixMonoInterp(const SWORD* srce,SLONG* dest,SLONGLONG index,SLONGLONG increment,SLONG todo)
521 SLONG sample;
522 SLONG lvolsel = vnf->lvolsel;
523 SLONG rampvol = vnf->rampvol;
525 if (rampvol) {
526 SLONG oldlvol = vnf->oldlvol - lvolsel;
527 while(todo--) {
528 sample=(SLONG)srce[index>>FRACBITS]+
529 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
530 *(index&FRACMASK)>>FRACBITS);
531 index += increment;
533 *dest++ += ((lvolsel << CLICK_SHIFT) + oldlvol * rampvol)
534 * sample >> CLICK_SHIFT;
535 if (!--rampvol)
536 break;
538 vnf->rampvol = rampvol;
539 if (todo < 0)
540 return index;
543 while(todo--) {
544 sample=(SLONG)srce[index>>FRACBITS]+
545 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
546 *(index&FRACMASK)>>FRACBITS);
547 index += increment;
549 *dest++ += lvolsel * sample;
551 return index;
554 static SLONGLONG MixStereoInterp(const SWORD* srce,SLONG* dest,SLONGLONG index,SLONGLONG increment,SLONG todo)
556 SLONG sample;
557 SLONG lvolsel = vnf->lvolsel;
558 SLONG rvolsel = vnf->rvolsel;
559 SLONG rampvol = vnf->rampvol;
561 if (rampvol) {
562 SLONG oldlvol = vnf->oldlvol - lvolsel;
563 SLONG oldrvol = vnf->oldrvol - rvolsel;
564 while(todo--) {
565 sample=(SLONG)srce[index>>FRACBITS]+
566 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
567 *(index&FRACMASK)>>FRACBITS);
568 index += increment;
570 *dest++ +=((lvolsel << CLICK_SHIFT) + oldlvol * rampvol)
571 * sample >> CLICK_SHIFT;
572 *dest++ +=((rvolsel << CLICK_SHIFT) + oldrvol * rampvol)
573 * sample >> CLICK_SHIFT;
574 if (!--rampvol)
575 break;
577 vnf->rampvol = rampvol;
578 if (todo < 0)
579 return index;
582 while(todo--) {
583 sample=(SLONG)srce[index>>FRACBITS]+
584 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
585 *(index&FRACMASK)>>FRACBITS);
586 index += increment;
588 *dest++ += lvolsel * sample;
589 *dest++ += rvolsel * sample;
591 return index;
594 static SLONGLONG MixSurroundInterp(const SWORD* srce,SLONG* dest,SLONGLONG index,SLONGLONG increment,SLONG todo)
596 SLONG sample;
597 SLONG lvolsel = vnf->lvolsel;
598 SLONG rvolsel = vnf->rvolsel;
599 SLONG rampvol = vnf->rampvol;
600 SLONG oldvol, vol;
602 if (lvolsel >= rvolsel) {
603 vol = lvolsel;
604 oldvol = vnf->oldlvol;
605 } else {
606 vol = rvolsel;
607 oldvol = vnf->oldrvol;
610 if (rampvol) {
611 oldvol -= vol;
612 while(todo--) {
613 sample=(SLONG)srce[index>>FRACBITS]+
614 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
615 *(index&FRACMASK)>>FRACBITS);
616 index += increment;
618 sample=((vol << CLICK_SHIFT) + oldvol * rampvol)
619 * sample >> CLICK_SHIFT;
620 *dest++ += sample;
621 *dest++ -= sample;
622 if (!--rampvol)
623 break;
625 vnf->rampvol = rampvol;
626 if (todo < 0)
627 return index;
630 while(todo--) {
631 sample=(SLONG)srce[index>>FRACBITS]+
632 ((SLONG)(srce[(index>>FRACBITS)+1]-srce[index>>FRACBITS])
633 *(index&FRACMASK)>>FRACBITS);
634 index += increment;
636 *dest++ += vol*sample;
637 *dest++ -= vol*sample;
639 return index;
642 static void (*MixReverb)(SLONG* srce,NATIVE count);
644 /* Reverb macros */
645 #define COMPUTE_LOC(n) loc##n = RVRindex % RVc##n
646 #define COMPUTE_LECHO(n) RVbufL##n [loc##n ]=speedup+((ReverbPct*RVbufL##n [loc##n ])>>7)
647 #define COMPUTE_RECHO(n) RVbufR##n [loc##n ]=speedup+((ReverbPct*RVbufR##n [loc##n ])>>7)
649 static void MixReverb_Normal(SLONG* srce,NATIVE count)
651 unsigned int speedup;
652 int ReverbPct;
653 unsigned int loc1,loc2,loc3,loc4;
654 unsigned int loc5,loc6,loc7,loc8;
656 ReverbPct=58+(md_reverb<<2);
658 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
659 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
661 while(count--) {
662 /* Compute the left channel echo buffers */
663 speedup = *srce >> 3;
665 COMPUTE_LECHO(1); COMPUTE_LECHO(2); COMPUTE_LECHO(3); COMPUTE_LECHO(4);
666 COMPUTE_LECHO(5); COMPUTE_LECHO(6); COMPUTE_LECHO(7); COMPUTE_LECHO(8);
668 /* Prepare to compute actual finalized data */
669 RVRindex++;
671 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
672 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
674 /* left channel */
675 *srce++ +=RVbufL1[loc1]-RVbufL2[loc2]+RVbufL3[loc3]-RVbufL4[loc4]+
676 RVbufL5[loc5]-RVbufL6[loc6]+RVbufL7[loc7]-RVbufL8[loc8];
680 static void MixReverb_Stereo(SLONG* srce,NATIVE count)
682 unsigned int speedup;
683 int ReverbPct;
684 unsigned int loc1, loc2, loc3, loc4;
685 unsigned int loc5, loc6, loc7, loc8;
687 ReverbPct = 92+(md_reverb<<1);
689 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
690 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
692 while(count--) {
693 /* Compute the left channel echo buffers */
694 speedup = *srce >> 3;
696 COMPUTE_LECHO(1); COMPUTE_LECHO(2); COMPUTE_LECHO(3); COMPUTE_LECHO(4);
697 COMPUTE_LECHO(5); COMPUTE_LECHO(6); COMPUTE_LECHO(7); COMPUTE_LECHO(8);
699 /* Compute the right channel echo buffers */
700 speedup = srce[1] >> 3;
702 COMPUTE_RECHO(1); COMPUTE_RECHO(2); COMPUTE_RECHO(3); COMPUTE_RECHO(4);
703 COMPUTE_RECHO(5); COMPUTE_RECHO(6); COMPUTE_RECHO(7); COMPUTE_RECHO(8);
705 /* Prepare to compute actual finalized data */
706 RVRindex++;
708 COMPUTE_LOC(1); COMPUTE_LOC(2); COMPUTE_LOC(3); COMPUTE_LOC(4);
709 COMPUTE_LOC(5); COMPUTE_LOC(6); COMPUTE_LOC(7); COMPUTE_LOC(8);
711 /* left channel then right channel */
712 *srce++ +=RVbufL1[loc1]-RVbufL2[loc2]+RVbufL3[loc3]-RVbufL4[loc4]+
713 RVbufL5[loc5]-RVbufL6[loc6]+RVbufL7[loc7]-RVbufL8[loc8];
715 *srce++ +=RVbufR1[loc1]-RVbufR2[loc2]+RVbufR3[loc3]-RVbufR4[loc4]+
716 RVbufR5[loc5]-RVbufR6[loc6]+RVbufR7[loc7]-RVbufR8[loc8];
720 static void (*MixLowPass)(SLONG* srce,NATIVE count);
722 static int nLeftNR, nRightNR;
724 static void MixLowPass_Stereo(SLONG* srce,NATIVE count)
726 int n1 = nLeftNR, n2 = nRightNR;
727 SLONG *pnr = srce;
728 int nr=count;
729 for (; nr; nr--)
731 int vnr = pnr[0] >> 1;
732 pnr[0] = vnr + n1;
733 n1 = vnr;
734 vnr = pnr[1] >> 1;
735 pnr[1] = vnr + n2;
736 n2 = vnr;
737 pnr += 2;
739 nLeftNR = n1;
740 nRightNR = n2;
743 static void MixLowPass_Normal(SLONG* srce,NATIVE count)
745 int n1 = nLeftNR;
746 SLONG *pnr = srce;
747 int nr=count;
748 for (; nr; nr--)
750 int vnr = pnr[0] >> 1;
751 pnr[0] = vnr + n1;
752 n1 = vnr;
753 pnr ++;
755 nLeftNR = n1;
758 /* shifting fudge factor for FP scaling, should be 0 < FP_SHIFT < BITSHIFT */
759 #define FP_SHIFT 4
761 /* Mixing macros */
762 #define EXTRACT_SAMPLE_FP(var,size) var=(*srce++>>(BITSHIFT-size)) * ((1.0f / 32768.0f) / (1 << size))
763 #define CHECK_SAMPLE_FP(var,bound) var=(var>bound)?bound:(var<-bound)?-bound:var
764 #define PUT_SAMPLE_FP(var) *dste++=var
766 static void Mix32ToFP(float* dste,const SLONG *srce,NATIVE count)
768 float x1,x2,x3,x4;
769 int remain;
771 remain=count&3;
772 for(count>>=2;count;count--) {
773 EXTRACT_SAMPLE_FP(x1,FP_SHIFT); EXTRACT_SAMPLE_FP(x2,FP_SHIFT);
774 EXTRACT_SAMPLE_FP(x3,FP_SHIFT); EXTRACT_SAMPLE_FP(x4,FP_SHIFT);
776 CHECK_SAMPLE_FP(x1,1.0f); CHECK_SAMPLE_FP(x2,1.0f);
777 CHECK_SAMPLE_FP(x3,1.0f); CHECK_SAMPLE_FP(x4,1.0f);
779 PUT_SAMPLE_FP(x1); PUT_SAMPLE_FP(x2);
780 PUT_SAMPLE_FP(x3); PUT_SAMPLE_FP(x4);
782 while(remain--) {
783 EXTRACT_SAMPLE_FP(x1,FP_SHIFT);
784 CHECK_SAMPLE_FP(x1,1.0f);
785 PUT_SAMPLE_FP(x1);
790 /* Mixing macros */
791 #define EXTRACT_SAMPLE(var,size) var=*srce++>>(BITSHIFT+16-size)
792 #define CHECK_SAMPLE(var,bound) var=(var>=bound)?bound-1:(var<-bound)?-bound:var
793 #define PUT_SAMPLE(var) *dste++=var
795 static void Mix32To16(SWORD* dste,const SLONG *srce,NATIVE count)
797 SLONG x1,x2,x3,x4;
798 int remain;
800 remain=count&3;
801 for(count>>=2;count;count--) {
802 EXTRACT_SAMPLE(x1,16); EXTRACT_SAMPLE(x2,16);
803 EXTRACT_SAMPLE(x3,16); EXTRACT_SAMPLE(x4,16);
805 CHECK_SAMPLE(x1,32768); CHECK_SAMPLE(x2,32768);
806 CHECK_SAMPLE(x3,32768); CHECK_SAMPLE(x4,32768);
808 PUT_SAMPLE(x1); PUT_SAMPLE(x2); PUT_SAMPLE(x3); PUT_SAMPLE(x4);
810 while(remain--) {
811 EXTRACT_SAMPLE(x1,16);
812 CHECK_SAMPLE(x1,32768);
813 PUT_SAMPLE(x1);
817 static void Mix32To8(SBYTE* dste,const SLONG *srce,NATIVE count)
819 SWORD x1,x2,x3,x4;
820 int remain;
822 remain=count&3;
823 for(count>>=2;count;count--) {
824 EXTRACT_SAMPLE(x1,8); EXTRACT_SAMPLE(x2,8);
825 EXTRACT_SAMPLE(x3,8); EXTRACT_SAMPLE(x4,8);
827 CHECK_SAMPLE(x1,128); CHECK_SAMPLE(x2,128);
828 CHECK_SAMPLE(x3,128); CHECK_SAMPLE(x4,128);
830 PUT_SAMPLE(x1+128); PUT_SAMPLE(x2+128);
831 PUT_SAMPLE(x3+128); PUT_SAMPLE(x4+128);
833 while(remain--) {
834 EXTRACT_SAMPLE(x1,8);
835 CHECK_SAMPLE(x1,128);
836 PUT_SAMPLE(x1+128);
840 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
842 // Mix 32bit input to floating point. 32 samples per iteration
843 // PC: ?, Mac OK
844 static void Mix32ToFP_SIMD(float* dste,SLONG* srce,NATIVE count)
846 int remain=count;
848 while(!IS_ALIGNED_16(dste) || !IS_ALIGNED_16(srce))
850 float x1;
851 EXTRACT_SAMPLE_FP(x1,FP_SHIFT);
852 CHECK_SAMPLE_FP(x1,1.0f);
853 PUT_SAMPLE_FP(x1);
854 count--;
855 if (!count)
857 return;
861 remain = count&7;
863 const float k = ((1.0f / 32768.0f) / (1 << FP_SHIFT));
864 simd_m128 x1, x2;
865 simd_m128 xk = LOAD_PS1_SIMD(&k); // Scale factor
867 for(count>>=3;count;count--) {
868 EXTRACT_SAMPLE_SIMD_F(srce, x1, FP_SHIFT, xk); // Load 4 samples
869 EXTRACT_SAMPLE_SIMD_F(srce+4, x2, FP_SHIFT, xk); // Load 4 samples
870 PUT_SAMPLE_SIMD_F(dste, x1); // Store 4 samples
871 PUT_SAMPLE_SIMD_F(dste+4, x2); // Store 4 samples
872 srce+=8;
873 dste+=8;
876 if (remain&4) {
877 EXTRACT_SAMPLE_SIMD_F(srce, x1, FP_SHIFT, xk); // Load 4 samples
878 PUT_SAMPLE_SIMD_F(dste, x1); // Store 4 samples
879 srce+=4;
880 dste+=4;
881 remain &= 3;
884 while(remain--) {
885 float x1;
886 EXTRACT_SAMPLE_FP(x1,FP_SHIFT);
887 CHECK_SAMPLE_FP(x1,1.0f);
888 PUT_SAMPLE_FP(x1);
891 // PC: Ok, Mac Ok
892 static void Mix32To16_SIMD(SWORD* dste,SLONG* srce,NATIVE count)
894 int remain = count;
896 while(!IS_ALIGNED_16(dste) || !IS_ALIGNED_16(srce))
898 SLONG x1;
899 EXTRACT_SAMPLE(x1,16);
900 CHECK_SAMPLE(x1,32768);
901 PUT_SAMPLE(x1);
902 count--;
903 if (!count)
905 return;
909 remain = count&7;
911 for(count>>=3;count;count--)
913 simd_m128i x1,x2;
914 EXTRACT_SAMPLE_SIMD_16(srce, x1); // Load 4 samples
915 EXTRACT_SAMPLE_SIMD_16(srce+4, x2); // Load 4 samples
916 PUT_SAMPLE_SIMD_W(dste, x1, x2); // Store 8 samples
917 srce+=8;
918 dste+=8;
921 if (remain)
922 Mix32To16(dste, srce, remain);
925 // Mix 32bit input to 8bit. 128 samples per iteration
926 // PC:OK, Mac: Ok
927 static void Mix32To8_SIMD(SBYTE* dste,SLONG* srce,NATIVE count)
929 int remain=count;
931 while(!IS_ALIGNED_16(dste) || !IS_ALIGNED_16(srce))
933 SWORD x1;
934 EXTRACT_SAMPLE(x1,8);
935 CHECK_SAMPLE(x1,128);
936 PUT_SAMPLE(x1+128);
937 count--;
938 if (!count)
940 return;
944 remain = count&15;
946 for(count>>=4;count;count--) {
947 simd_m128i x1,x2,x3,x4;
948 EXTRACT_SAMPLE_SIMD_8(srce, x1); // Load 4 samples
949 EXTRACT_SAMPLE_SIMD_8(srce+4, x2); // Load 4 samples
950 EXTRACT_SAMPLE_SIMD_8(srce+8, x3); // Load 4 samples
951 EXTRACT_SAMPLE_SIMD_8(srce+12, x4); // Load 4 samples
952 PUT_SAMPLE_SIMD_B(dste, x1, x2, x3, x4); // Store 16 samples
953 srce+=16;
954 dste+=16;
956 if (remain)
957 Mix32To8(dste, srce, remain);
960 #endif
964 static void AddChannel(SLONG* ptr,NATIVE todo)
966 SLONGLONG end,done;
967 SWORD *s;
969 if(!(s=Samples[vnf->handle])) {
970 vnf->current = vnf->active = 0;
971 return;
974 /* update the 'current' index so the sample loops, or stops playing if it
975 reached the end of the sample */
976 while(todo>0) {
977 SLONGLONG endpos;
979 if(vnf->flags & SF_REVERSE) {
980 /* The sample is playing in reverse */
981 if((vnf->flags&SF_LOOP)&&(vnf->current<idxlpos)) {
982 /* the sample is looping and has reached the loopstart index */
983 if(vnf->flags & SF_BIDI) {
984 /* sample is doing bidirectional loops, so 'bounce' the
985 current index against the idxlpos */
986 vnf->current = idxlpos+(idxlpos-vnf->current);
987 vnf->flags &= ~SF_REVERSE;
988 vnf->increment = -vnf->increment;
989 } else
990 /* normal backwards looping, so set the current position to
991 loopend index */
992 vnf->current=idxlend-(idxlpos-vnf->current);
993 } else {
994 /* the sample is not looping, so check if it reached index 0 */
995 if(vnf->current < 0) {
996 /* playing index reached 0, so stop playing this sample */
997 vnf->current = vnf->active = 0;
998 break;
1001 } else {
1002 /* The sample is playing forward */
1003 if((vnf->flags & SF_LOOP) &&
1004 (vnf->current >= idxlend)) {
1005 /* the sample is looping, check the loopend index */
1006 if(vnf->flags & SF_BIDI) {
1007 /* sample is doing bidirectional loops, so 'bounce' the
1008 current index against the idxlend */
1009 vnf->flags |= SF_REVERSE;
1010 vnf->increment = -vnf->increment;
1011 vnf->current = idxlend-(vnf->current-idxlend);
1012 } else
1013 /* normal backwards looping, so set the current position
1014 to loopend index */
1015 vnf->current=idxlpos+(vnf->current-idxlend);
1016 } else {
1017 /* sample is not looping, so check if it reached the last
1018 position */
1019 if(vnf->current >= idxsize) {
1020 /* yes, so stop playing this sample */
1021 vnf->current = vnf->active = 0;
1022 break;
1027 end=(vnf->flags&SF_REVERSE)?(vnf->flags&SF_LOOP)?idxlpos:0:
1028 (vnf->flags&SF_LOOP)?idxlend:idxsize;
1030 /* if the sample is not blocked... */
1031 if((end==vnf->current)||(!vnf->increment))
1032 done=0;
1033 else {
1034 done=MIN((end-vnf->current)/vnf->increment+1,todo);
1035 if(done<0) done=0;
1038 if(!done) {
1039 vnf->active = 0;
1040 break;
1043 endpos=vnf->current+done*vnf->increment;
1045 if(vnf->vol) {
1046 #ifndef NATIVE_64BIT_INT
1047 /* use the 32 bit mixers as often as we can (they're much faster) */
1048 if((vnf->current<0x7fffffff)&&(endpos<0x7fffffff)) {
1049 if((md_mode & DMODE_INTERP)) {
1050 if(vc_mode & DMODE_STEREO) {
1051 if((vnf->pan==PAN_SURROUND)&&(md_mode&DMODE_SURROUND))
1052 vnf->current=Mix32SurroundInterp
1053 (s,ptr,vnf->current,vnf->increment,done);
1054 else
1055 vnf->current=Mix32StereoInterp
1056 (s,ptr,vnf->current,vnf->increment,done);
1057 } else
1058 vnf->current=Mix32MonoInterp
1059 (s,ptr,vnf->current,vnf->increment,done);
1060 } else if(vc_mode & DMODE_STEREO) {
1061 if((vnf->pan==PAN_SURROUND)&&(md_mode&DMODE_SURROUND))
1062 vnf->current=Mix32SurroundNormal
1063 (s,ptr,vnf->current,vnf->increment,done);
1064 else
1066 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
1067 if (md_mode & DMODE_SIMDMIXER)
1068 vnf->current=MixSIMDStereoNormal
1069 (s,ptr,vnf->current,vnf->increment,done);
1071 else
1072 #endif
1073 vnf->current=Mix32StereoNormal
1074 (s,ptr,vnf->current,vnf->increment,done);
1076 } else
1077 vnf->current=Mix32MonoNormal
1078 (s,ptr,vnf->current,vnf->increment,done);
1079 } else
1080 #endif
1082 if((md_mode & DMODE_INTERP)) {
1083 if(vc_mode & DMODE_STEREO) {
1084 if((vnf->pan==PAN_SURROUND)&&(md_mode&DMODE_SURROUND))
1085 vnf->current=MixSurroundInterp
1086 (s,ptr,vnf->current,vnf->increment,done);
1087 else
1088 vnf->current=MixStereoInterp
1089 (s,ptr,vnf->current,vnf->increment,done);
1090 } else
1091 vnf->current=MixMonoInterp
1092 (s,ptr,vnf->current,vnf->increment,done);
1093 } else if(vc_mode & DMODE_STEREO) {
1094 if((vnf->pan==PAN_SURROUND)&&(md_mode&DMODE_SURROUND))
1095 vnf->current=MixSurroundNormal
1096 (s,ptr,vnf->current,vnf->increment,done);
1097 else
1099 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
1100 if (md_mode & DMODE_SIMDMIXER)
1101 vnf->current=MixSIMDStereoNormal
1102 (s,ptr,vnf->current,vnf->increment,done);
1104 else
1105 #endif
1106 vnf->current=MixStereoNormal
1107 (s,ptr,vnf->current,vnf->increment,done);
1109 } else
1110 vnf->current=MixMonoNormal
1111 (s,ptr,vnf->current,vnf->increment,done);
1113 } else
1114 /* update sample position */
1115 vnf->current=endpos;
1117 todo-=done;
1118 ptr +=(vc_mode & DMODE_STEREO)?(done<<1):done;
1122 #define _IN_VIRTCH_
1123 #include "virtch_common.c"
1124 #undef _IN_VIRTCH_
1126 void VC1_WriteSamples(SBYTE* buf,ULONG todo)
1128 int left,portion=0,count;
1129 SBYTE *buffer;
1130 int t, pan, vol;
1132 while(todo) {
1133 if(!tickleft) {
1134 if(vc_mode & DMODE_SOFT_MUSIC) md_player();
1135 tickleft=(md_mixfreq*125L)/(md_bpm*50L);
1137 left = MIN(tickleft, todo);
1138 buffer = buf;
1139 tickleft -= left;
1140 todo -= left;
1141 buf += samples2bytes(left);
1143 while(left) {
1144 portion = MIN(left, samplesthatfit);
1145 count = (vc_mode & DMODE_STEREO)?(portion<<1):portion;
1146 memset(vc_tickbuf, 0, count<<2);
1147 for(t=0;t<vc_softchn;t++) {
1148 vnf = &vinf[t];
1150 if(vnf->kick) {
1151 vnf->current=((SLONGLONG)vnf->start)<<FRACBITS;
1152 vnf->kick =0;
1153 vnf->active =1;
1156 if(!vnf->frq) vnf->active = 0;
1158 if(vnf->active) {
1159 vnf->increment=((SLONGLONG)(vnf->frq<<FRACBITS))/md_mixfreq;
1160 if(vnf->flags&SF_REVERSE) vnf->increment=-vnf->increment;
1161 vol = vnf->vol; pan = vnf->pan;
1163 vnf->oldlvol=vnf->lvolsel;vnf->oldrvol=vnf->rvolsel;
1164 if(vc_mode & DMODE_STEREO) {
1165 if(pan != PAN_SURROUND) {
1166 vnf->lvolsel=(vol*(PAN_RIGHT-pan))>>8;
1167 vnf->rvolsel=(vol*pan)>>8;
1168 } else
1169 vnf->lvolsel=vnf->rvolsel=vol/2;
1170 } else
1171 vnf->lvolsel=vol;
1173 idxsize = (vnf->size)? ((SLONGLONG)vnf->size << FRACBITS)-1 : 0;
1174 idxlend = (vnf->repend)? ((SLONGLONG)vnf->repend << FRACBITS)-1 : 0;
1175 idxlpos = (SLONGLONG)vnf->reppos << FRACBITS;
1176 AddChannel(vc_tickbuf, portion);
1180 if(md_mode & DMODE_NOISEREDUCTION) {
1181 MixLowPass(vc_tickbuf, portion);
1184 if(md_reverb) {
1185 if(md_reverb>15) md_reverb=15;
1186 MixReverb(vc_tickbuf, portion);
1189 if (vc_callback) {
1190 vc_callback((unsigned char*)vc_tickbuf, portion);
1194 #if defined HAVE_ALTIVEC || defined HAVE_SSE2
1195 if (md_mode & DMODE_SIMDMIXER)
1197 if(vc_mode & DMODE_FLOAT)
1198 Mix32ToFP_SIMD((float*) buffer, vc_tickbuf, count);
1199 else if(vc_mode & DMODE_16BITS)
1200 Mix32To16_SIMD((SWORD*) buffer, vc_tickbuf, count);
1201 else
1202 Mix32To8_SIMD((SBYTE*) buffer, vc_tickbuf, count);
1204 else
1205 #endif
1207 if(vc_mode & DMODE_FLOAT)
1208 Mix32ToFP((float*) buffer, vc_tickbuf, count);
1209 else if(vc_mode & DMODE_16BITS)
1210 Mix32To16((SWORD*) buffer, vc_tickbuf, count);
1211 else
1212 Mix32To8((SBYTE*) buffer, vc_tickbuf, count);
1214 buffer += samples2bytes(portion);
1215 left -= portion;
1220 int VC1_Init(void)
1222 VC_SetupPointers();
1224 //if (md_mode&DMODE_HQMIXER)
1225 // return VC2_Init();
1227 if(!(Samples=(SWORD**)MikMod_calloc(MAXSAMPLEHANDLES,sizeof(SWORD*)))) {
1228 _mm_errno = MMERR_INITIALIZING_MIXER;
1229 return 1;
1231 if(!vc_tickbuf)
1232 if(!(vc_tickbuf=(SLONG*)MikMod_malloc((TICKLSIZE+32)*sizeof(SLONG)))) {
1233 _mm_errno = MMERR_INITIALIZING_MIXER;
1234 return 1;
1237 MixReverb=(md_mode&DMODE_STEREO)?MixReverb_Stereo:MixReverb_Normal;
1238 MixLowPass=(md_mode&DMODE_STEREO)?MixLowPass_Stereo:MixLowPass_Normal;
1239 vc_mode = md_mode;
1240 return 0;
1243 int VC1_PlayStart(void)
1245 samplesthatfit=TICKLSIZE;
1246 if(vc_mode & DMODE_STEREO) samplesthatfit >>= 1;
1247 tickleft = 0;
1249 RVc1 = (5000L * md_mixfreq) / REVERBERATION;
1250 RVc2 = (5078L * md_mixfreq) / REVERBERATION;
1251 RVc3 = (5313L * md_mixfreq) / REVERBERATION;
1252 RVc4 = (5703L * md_mixfreq) / REVERBERATION;
1253 RVc5 = (6250L * md_mixfreq) / REVERBERATION;
1254 RVc6 = (6953L * md_mixfreq) / REVERBERATION;
1255 RVc7 = (7813L * md_mixfreq) / REVERBERATION;
1256 RVc8 = (8828L * md_mixfreq) / REVERBERATION;
1258 if(!(RVbufL1=(SLONG*)MikMod_calloc((RVc1+1),sizeof(SLONG)))) return 1;
1259 if(!(RVbufL2=(SLONG*)MikMod_calloc((RVc2+1),sizeof(SLONG)))) return 1;
1260 if(!(RVbufL3=(SLONG*)MikMod_calloc((RVc3+1),sizeof(SLONG)))) return 1;
1261 if(!(RVbufL4=(SLONG*)MikMod_calloc((RVc4+1),sizeof(SLONG)))) return 1;
1262 if(!(RVbufL5=(SLONG*)MikMod_calloc((RVc5+1),sizeof(SLONG)))) return 1;
1263 if(!(RVbufL6=(SLONG*)MikMod_calloc((RVc6+1),sizeof(SLONG)))) return 1;
1264 if(!(RVbufL7=(SLONG*)MikMod_calloc((RVc7+1),sizeof(SLONG)))) return 1;
1265 if(!(RVbufL8=(SLONG*)MikMod_calloc((RVc8+1),sizeof(SLONG)))) return 1;
1267 if(!(RVbufR1=(SLONG*)MikMod_calloc((RVc1+1),sizeof(SLONG)))) return 1;
1268 if(!(RVbufR2=(SLONG*)MikMod_calloc((RVc2+1),sizeof(SLONG)))) return 1;
1269 if(!(RVbufR3=(SLONG*)MikMod_calloc((RVc3+1),sizeof(SLONG)))) return 1;
1270 if(!(RVbufR4=(SLONG*)MikMod_calloc((RVc4+1),sizeof(SLONG)))) return 1;
1271 if(!(RVbufR5=(SLONG*)MikMod_calloc((RVc5+1),sizeof(SLONG)))) return 1;
1272 if(!(RVbufR6=(SLONG*)MikMod_calloc((RVc6+1),sizeof(SLONG)))) return 1;
1273 if(!(RVbufR7=(SLONG*)MikMod_calloc((RVc7+1),sizeof(SLONG)))) return 1;
1274 if(!(RVbufR8=(SLONG*)MikMod_calloc((RVc8+1),sizeof(SLONG)))) return 1;
1276 RVRindex = 0;
1277 return 0;
1280 void VC1_PlayStop(void)
1282 if(RVbufL1) MikMod_free(RVbufL1);
1283 if(RVbufL2) MikMod_free(RVbufL2);
1284 if(RVbufL3) MikMod_free(RVbufL3);
1285 if(RVbufL4) MikMod_free(RVbufL4);
1286 if(RVbufL5) MikMod_free(RVbufL5);
1287 if(RVbufL6) MikMod_free(RVbufL6);
1288 if(RVbufL7) MikMod_free(RVbufL7);
1289 if(RVbufL8) MikMod_free(RVbufL8);
1290 RVbufL1=RVbufL2=RVbufL3=RVbufL4=RVbufL5=RVbufL6=RVbufL7=RVbufL8=NULL;
1291 if(RVbufR1) MikMod_free(RVbufR1);
1292 if(RVbufR2) MikMod_free(RVbufR2);
1293 if(RVbufR3) MikMod_free(RVbufR3);
1294 if(RVbufR4) MikMod_free(RVbufR4);
1295 if(RVbufR5) MikMod_free(RVbufR5);
1296 if(RVbufR6) MikMod_free(RVbufR6);
1297 if(RVbufR7) MikMod_free(RVbufR7);
1298 if(RVbufR8) MikMod_free(RVbufR8);
1299 RVbufR1=RVbufR2=RVbufR3=RVbufR4=RVbufR5=RVbufR6=RVbufR7=RVbufR8=NULL;
1302 int VC1_SetNumVoices(void)
1304 int t;
1306 if(!(vc_softchn=md_softchn)) return 0;
1308 if(vinf) MikMod_free(vinf);
1309 if(!(vinf= MikMod_calloc(sizeof(VINFO),vc_softchn))) return 1;
1311 for(t=0;t<vc_softchn;t++) {
1312 vinf[t].frq=10000;
1313 vinf[t].pan=(t&1)?PAN_LEFT:PAN_RIGHT;
1316 return 0;
1319 /* ex:set ts=4: */