1 |
|
2 |
|
3 |
|
4 |
|
5 |
|
6 |
|
7 |
|
8 |
|
9 |
|
10 |
|
11 |
|
12 |
|
13 |
|
14 |
|
15 |
|
16 |
|
17 |
|
18 |
|
19 |
|
20 |
|
21 |
|
22 |
|
23 | class FifoSampleBuffer {
|
24 | constructor() {
|
25 | this._vector = new Float32Array();
|
26 | this._position = 0;
|
27 | this._frameCount = 0;
|
28 | }
|
29 | get vector() {
|
30 | return this._vector;
|
31 | }
|
32 | get position() {
|
33 | return this._position;
|
34 | }
|
35 | get startIndex() {
|
36 | return this._position * 2;
|
37 | }
|
38 | get frameCount() {
|
39 | return this._frameCount;
|
40 | }
|
41 | get endIndex() {
|
42 | return (this._position + this._frameCount) * 2;
|
43 | }
|
44 | clear() {
|
45 | this.receive(this._frameCount);
|
46 | this.rewind();
|
47 | }
|
48 | put(numFrames) {
|
49 | this._frameCount += numFrames;
|
50 | }
|
51 | putSamples(samples, position, numFrames = 0) {
|
52 | position = position || 0;
|
53 | const sourceOffset = position * 2;
|
54 | if (!(numFrames >= 0)) {
|
55 | numFrames = (samples.length - sourceOffset) / 2;
|
56 | }
|
57 | const numSamples = numFrames * 2;
|
58 | this.ensureCapacity(numFrames + this._frameCount);
|
59 | const destOffset = this.endIndex;
|
60 | this.vector.set(samples.subarray(sourceOffset, sourceOffset + numSamples), destOffset);
|
61 | this._frameCount += numFrames;
|
62 | }
|
63 | putBuffer(buffer, position, numFrames = 0) {
|
64 | position = position || 0;
|
65 | if (!(numFrames >= 0)) {
|
66 | numFrames = buffer.frameCount - position;
|
67 | }
|
68 | this.putSamples(buffer.vector, buffer.position + position, numFrames);
|
69 | }
|
70 | receive(numFrames) {
|
71 | if (!(numFrames >= 0) || numFrames > this._frameCount) {
|
72 | numFrames = this.frameCount;
|
73 | }
|
74 | this._frameCount -= numFrames;
|
75 | this._position += numFrames;
|
76 | }
|
77 | receiveSamples(output, numFrames = 0) {
|
78 | const numSamples = numFrames * 2;
|
79 | const sourceOffset = this.startIndex;
|
80 | output.set(this._vector.subarray(sourceOffset, sourceOffset + numSamples));
|
81 | this.receive(numFrames);
|
82 | }
|
83 | extract(output, position = 0, numFrames = 0) {
|
84 | const sourceOffset = this.startIndex + position * 2;
|
85 | const numSamples = numFrames * 2;
|
86 | output.set(this._vector.subarray(sourceOffset, sourceOffset + numSamples));
|
87 | }
|
88 | ensureCapacity(numFrames = 0) {
|
89 | const minLength = parseInt(numFrames * 2);
|
90 | if (this._vector.length < minLength) {
|
91 | const newVector = new Float32Array(minLength);
|
92 | newVector.set(this._vector.subarray(this.startIndex, this.endIndex));
|
93 | this._vector = newVector;
|
94 | this._position = 0;
|
95 | } else {
|
96 | this.rewind();
|
97 | }
|
98 | }
|
99 | ensureAdditionalCapacity(numFrames = 0) {
|
100 | this.ensureCapacity(this._frameCount + numFrames);
|
101 | }
|
102 | rewind() {
|
103 | if (this._position > 0) {
|
104 | this._vector.set(this._vector.subarray(this.startIndex, this.endIndex));
|
105 | this._position = 0;
|
106 | }
|
107 | }
|
108 | }
|
109 |
|
110 | class AbstractFifoSamplePipe {
|
111 | constructor(createBuffers) {
|
112 | if (createBuffers) {
|
113 | this._inputBuffer = new FifoSampleBuffer();
|
114 | this._outputBuffer = new FifoSampleBuffer();
|
115 | } else {
|
116 | this._inputBuffer = this._outputBuffer = null;
|
117 | }
|
118 | }
|
119 | get inputBuffer() {
|
120 | return this._inputBuffer;
|
121 | }
|
122 | set inputBuffer(inputBuffer) {
|
123 | this._inputBuffer = inputBuffer;
|
124 | }
|
125 | get outputBuffer() {
|
126 | return this._outputBuffer;
|
127 | }
|
128 | set outputBuffer(outputBuffer) {
|
129 | this._outputBuffer = outputBuffer;
|
130 | }
|
131 | clear() {
|
132 | this._inputBuffer.clear();
|
133 | this._outputBuffer.clear();
|
134 | }
|
135 | }
|
136 |
|
137 | class RateTransposer extends AbstractFifoSamplePipe {
|
138 | constructor(createBuffers) {
|
139 | super(createBuffers);
|
140 | this.reset();
|
141 | this._rate = 1;
|
142 | }
|
143 | set rate(rate) {
|
144 | this._rate = rate;
|
145 | }
|
146 | reset() {
|
147 | this.slopeCount = 0;
|
148 | this.prevSampleL = 0;
|
149 | this.prevSampleR = 0;
|
150 | }
|
151 | clone() {
|
152 | const result = new RateTransposer();
|
153 | result.rate = this._rate;
|
154 | return result;
|
155 | }
|
156 | process() {
|
157 | const numFrames = this._inputBuffer.frameCount;
|
158 | this._outputBuffer.ensureAdditionalCapacity(numFrames / this._rate + 1);
|
159 | const numFramesOutput = this.transpose(numFrames);
|
160 | this._inputBuffer.receive();
|
161 | this._outputBuffer.put(numFramesOutput);
|
162 | }
|
163 | transpose(numFrames = 0) {
|
164 | if (numFrames === 0) {
|
165 | return 0;
|
166 | }
|
167 | const src = this._inputBuffer.vector;
|
168 | const srcOffset = this._inputBuffer.startIndex;
|
169 | const dest = this._outputBuffer.vector;
|
170 | const destOffset = this._outputBuffer.endIndex;
|
171 | let used = 0;
|
172 | let i = 0;
|
173 | while (this.slopeCount < 1.0) {
|
174 | dest[destOffset + 2 * i] = (1.0 - this.slopeCount) * this.prevSampleL + this.slopeCount * src[srcOffset];
|
175 | dest[destOffset + 2 * i + 1] = (1.0 - this.slopeCount) * this.prevSampleR + this.slopeCount * src[srcOffset + 1];
|
176 | i = i + 1;
|
177 | this.slopeCount += this._rate;
|
178 | }
|
179 | this.slopeCount -= 1.0;
|
180 | if (numFrames !== 1) {
|
181 | out: while (true) {
|
182 | while (this.slopeCount > 1.0) {
|
183 | this.slopeCount -= 1.0;
|
184 | used = used + 1;
|
185 | if (used >= numFrames - 1) {
|
186 | break out;
|
187 | }
|
188 | }
|
189 | const srcIndex = srcOffset + 2 * used;
|
190 | dest[destOffset + 2 * i] = (1.0 - this.slopeCount) * src[srcIndex] + this.slopeCount * src[srcIndex + 2];
|
191 | dest[destOffset + 2 * i + 1] = (1.0 - this.slopeCount) * src[srcIndex + 1] + this.slopeCount * src[srcIndex + 3];
|
192 | i = i + 1;
|
193 | this.slopeCount += this._rate;
|
194 | }
|
195 | }
|
196 | this.prevSampleL = src[srcOffset + 2 * numFrames - 2];
|
197 | this.prevSampleR = src[srcOffset + 2 * numFrames - 1];
|
198 | return i;
|
199 | }
|
200 | }
|
201 |
|
202 | class FilterSupport {
|
203 | constructor(pipe) {
|
204 | this._pipe = pipe;
|
205 | }
|
206 | get pipe() {
|
207 | return this._pipe;
|
208 | }
|
209 | get inputBuffer() {
|
210 | return this._pipe.inputBuffer;
|
211 | }
|
212 | get outputBuffer() {
|
213 | return this._pipe.outputBuffer;
|
214 | }
|
215 | fillInputBuffer() {
|
216 | throw new Error('fillInputBuffer() not overridden');
|
217 | }
|
218 | fillOutputBuffer(numFrames = 0) {
|
219 | while (this.outputBuffer.frameCount < numFrames) {
|
220 | const numInputFrames = 8192 * 2 - this.inputBuffer.frameCount;
|
221 | this.fillInputBuffer(numInputFrames);
|
222 | if (this.inputBuffer.frameCount < 8192 * 2) {
|
223 | break;
|
224 | }
|
225 | this._pipe.process();
|
226 | }
|
227 | }
|
228 | clear() {
|
229 | this._pipe.clear();
|
230 | }
|
231 | }
|
232 |
|
233 | const noop = function () {
|
234 | return;
|
235 | };
|
236 |
|
237 | class SimpleFilter extends FilterSupport {
|
238 | constructor(sourceSound, pipe, callback = noop) {
|
239 | super(pipe);
|
240 | this.callback = callback;
|
241 | this.sourceSound = sourceSound;
|
242 | this.historyBufferSize = 22050;
|
243 | this._sourcePosition = 0;
|
244 | this.outputBufferPosition = 0;
|
245 | this._position = 0;
|
246 | }
|
247 | get position() {
|
248 | return this._position;
|
249 | }
|
250 | set position(position) {
|
251 | if (position > this._position) {
|
252 | throw new RangeError('New position may not be greater than current position');
|
253 | }
|
254 | const newOutputBufferPosition = this.outputBufferPosition - (this._position - position);
|
255 | if (newOutputBufferPosition < 0) {
|
256 | throw new RangeError('New position falls outside of history buffer');
|
257 | }
|
258 | this.outputBufferPosition = newOutputBufferPosition;
|
259 | this._position = position;
|
260 | }
|
261 | get sourcePosition() {
|
262 | return this._sourcePosition;
|
263 | }
|
264 | set sourcePosition(sourcePosition) {
|
265 | this.clear();
|
266 | this._sourcePosition = sourcePosition;
|
267 | }
|
268 | onEnd() {
|
269 | this.callback();
|
270 | }
|
271 | fillInputBuffer(numFrames = 0) {
|
272 | const samples = new Float32Array(numFrames * 2);
|
273 | const numFramesExtracted = this.sourceSound.extract(samples, numFrames, this._sourcePosition);
|
274 | this._sourcePosition += numFramesExtracted;
|
275 | this.inputBuffer.putSamples(samples, 0, numFramesExtracted);
|
276 | }
|
277 | extract(target, numFrames = 0) {
|
278 | this.fillOutputBuffer(this.outputBufferPosition + numFrames);
|
279 | const numFramesExtracted = Math.min(numFrames, this.outputBuffer.frameCount - this.outputBufferPosition);
|
280 | this.outputBuffer.extract(target, this.outputBufferPosition, numFramesExtracted);
|
281 | const currentFrames = this.outputBufferPosition + numFramesExtracted;
|
282 | this.outputBufferPosition = Math.min(this.historyBufferSize, currentFrames);
|
283 | this.outputBuffer.receive(Math.max(currentFrames - this.historyBufferSize, 0));
|
284 | this._position += numFramesExtracted;
|
285 | return numFramesExtracted;
|
286 | }
|
287 | handleSampleData(event) {
|
288 | this.extract(event.data, 4096);
|
289 | }
|
290 | clear() {
|
291 | super.clear();
|
292 | this.outputBufferPosition = 0;
|
293 | }
|
294 | }
|
295 |
|
296 | const USE_AUTO_SEQUENCE_LEN = 0;
|
297 | const DEFAULT_SEQUENCE_MS = USE_AUTO_SEQUENCE_LEN;
|
298 | const USE_AUTO_SEEKWINDOW_LEN = 0;
|
299 | const DEFAULT_SEEKWINDOW_MS = USE_AUTO_SEEKWINDOW_LEN;
|
300 | const DEFAULT_OVERLAP_MS = 8;
|
301 | const _SCAN_OFFSETS = [[124, 186, 248, 310, 372, 434, 496, 558, 620, 682, 744, 806, 868, 930, 992, 1054, 1116, 1178, 1240, 1302, 1364, 1426, 1488, 0], [-100, -75, -50, -25, 25, 50, 75, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [-20, -15, -10, -5, 5, 10, 15, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [-4, -3, -2, -1, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]];
|
302 | const AUTOSEQ_TEMPO_LOW = 0.5;
|
303 | const AUTOSEQ_TEMPO_TOP = 2.0;
|
304 | const AUTOSEQ_AT_MIN = 125.0;
|
305 | const AUTOSEQ_AT_MAX = 50.0;
|
306 | const AUTOSEQ_K = (AUTOSEQ_AT_MAX - AUTOSEQ_AT_MIN) / (AUTOSEQ_TEMPO_TOP - AUTOSEQ_TEMPO_LOW);
|
307 | const AUTOSEQ_C = AUTOSEQ_AT_MIN - AUTOSEQ_K * AUTOSEQ_TEMPO_LOW;
|
308 | const AUTOSEEK_AT_MIN = 25.0;
|
309 | const AUTOSEEK_AT_MAX = 15.0;
|
310 | const AUTOSEEK_K = (AUTOSEEK_AT_MAX - AUTOSEEK_AT_MIN) / (AUTOSEQ_TEMPO_TOP - AUTOSEQ_TEMPO_LOW);
|
311 | const AUTOSEEK_C = AUTOSEEK_AT_MIN - AUTOSEEK_K * AUTOSEQ_TEMPO_LOW;
|
312 | class Stretch extends AbstractFifoSamplePipe {
|
313 | constructor(createBuffers) {
|
314 | super(createBuffers);
|
315 | this._quickSeek = true;
|
316 | this.midBufferDirty = false;
|
317 | this.midBuffer = null;
|
318 | this.overlapLength = 0;
|
319 | this.autoSeqSetting = true;
|
320 | this.autoSeekSetting = true;
|
321 | this._tempo = 1;
|
322 | this.setParameters(44100, DEFAULT_SEQUENCE_MS, DEFAULT_SEEKWINDOW_MS, DEFAULT_OVERLAP_MS);
|
323 | }
|
324 | clear() {
|
325 | super.clear();
|
326 | this.clearMidBuffer();
|
327 | }
|
328 | clearMidBuffer() {
|
329 | if (this.midBufferDirty) {
|
330 | this.midBufferDirty = false;
|
331 | this.midBuffer = null;
|
332 | }
|
333 | }
|
334 | setParameters(sampleRate, sequenceMs, seekWindowMs, overlapMs) {
|
335 | if (sampleRate > 0) {
|
336 | this.sampleRate = sampleRate;
|
337 | }
|
338 | if (overlapMs > 0) {
|
339 | this.overlapMs = overlapMs;
|
340 | }
|
341 | if (sequenceMs > 0) {
|
342 | this.sequenceMs = sequenceMs;
|
343 | this.autoSeqSetting = false;
|
344 | } else {
|
345 | this.autoSeqSetting = true;
|
346 | }
|
347 | if (seekWindowMs > 0) {
|
348 | this.seekWindowMs = seekWindowMs;
|
349 | this.autoSeekSetting = false;
|
350 | } else {
|
351 | this.autoSeekSetting = true;
|
352 | }
|
353 | this.calculateSequenceParameters();
|
354 | this.calculateOverlapLength(this.overlapMs);
|
355 | this.tempo = this._tempo;
|
356 | }
|
357 | set tempo(newTempo) {
|
358 | let intskip;
|
359 | this._tempo = newTempo;
|
360 | this.calculateSequenceParameters();
|
361 | this.nominalSkip = this._tempo * (this.seekWindowLength - this.overlapLength);
|
362 | this.skipFract = 0;
|
363 | intskip = Math.floor(this.nominalSkip + 0.5);
|
364 | this.sampleReq = Math.max(intskip + this.overlapLength, this.seekWindowLength) + this.seekLength;
|
365 | }
|
366 | get tempo() {
|
367 | return this._tempo;
|
368 | }
|
369 | get inputChunkSize() {
|
370 | return this.sampleReq;
|
371 | }
|
372 | get outputChunkSize() {
|
373 | return this.overlapLength + Math.max(0, this.seekWindowLength - 2 * this.overlapLength);
|
374 | }
|
375 | calculateOverlapLength(overlapInMsec = 0) {
|
376 | let newOvl;
|
377 | newOvl = this.sampleRate * overlapInMsec / 1000;
|
378 | newOvl = newOvl < 16 ? 16 : newOvl;
|
379 | newOvl -= newOvl % 8;
|
380 | this.overlapLength = newOvl;
|
381 | this.refMidBuffer = new Float32Array(this.overlapLength * 2);
|
382 | this.midBuffer = new Float32Array(this.overlapLength * 2);
|
383 | }
|
384 | checkLimits(x, mi, ma) {
|
385 | return x < mi ? mi : x > ma ? ma : x;
|
386 | }
|
387 | calculateSequenceParameters() {
|
388 | let seq;
|
389 | let seek;
|
390 | if (this.autoSeqSetting) {
|
391 | seq = AUTOSEQ_C + AUTOSEQ_K * this._tempo;
|
392 | seq = this.checkLimits(seq, AUTOSEQ_AT_MAX, AUTOSEQ_AT_MIN);
|
393 | this.sequenceMs = Math.floor(seq + 0.5);
|
394 | }
|
395 | if (this.autoSeekSetting) {
|
396 | seek = AUTOSEEK_C + AUTOSEEK_K * this._tempo;
|
397 | seek = this.checkLimits(seek, AUTOSEEK_AT_MAX, AUTOSEEK_AT_MIN);
|
398 | this.seekWindowMs = Math.floor(seek + 0.5);
|
399 | }
|
400 | this.seekWindowLength = Math.floor(this.sampleRate * this.sequenceMs / 1000);
|
401 | this.seekLength = Math.floor(this.sampleRate * this.seekWindowMs / 1000);
|
402 | }
|
403 | set quickSeek(enable) {
|
404 | this._quickSeek = enable;
|
405 | }
|
406 | clone() {
|
407 | const result = new Stretch();
|
408 | result.tempo = this._tempo;
|
409 | result.setParameters(this.sampleRate, this.sequenceMs, this.seekWindowMs, this.overlapMs);
|
410 | return result;
|
411 | }
|
412 | seekBestOverlapPosition() {
|
413 | return this._quickSeek ? this.seekBestOverlapPositionStereoQuick() : this.seekBestOverlapPositionStereo();
|
414 | }
|
415 | seekBestOverlapPositionStereo() {
|
416 | let bestOffset;
|
417 | let bestCorrelation;
|
418 | let correlation;
|
419 | let i = 0;
|
420 | this.preCalculateCorrelationReferenceStereo();
|
421 | bestOffset = 0;
|
422 | bestCorrelation = Number.MIN_VALUE;
|
423 | for (; i < this.seekLength; i = i + 1) {
|
424 | correlation = this.calculateCrossCorrelationStereo(2 * i, this.refMidBuffer);
|
425 | if (correlation > bestCorrelation) {
|
426 | bestCorrelation = correlation;
|
427 | bestOffset = i;
|
428 | }
|
429 | }
|
430 | return bestOffset;
|
431 | }
|
432 | seekBestOverlapPositionStereoQuick() {
|
433 | let bestOffset;
|
434 | let bestCorrelation;
|
435 | let correlation;
|
436 | let scanCount = 0;
|
437 | let correlationOffset;
|
438 | let tempOffset;
|
439 | this.preCalculateCorrelationReferenceStereo();
|
440 | bestCorrelation = Number.MIN_VALUE;
|
441 | bestOffset = 0;
|
442 | correlationOffset = 0;
|
443 | tempOffset = 0;
|
444 | for (; scanCount < 4; scanCount = scanCount + 1) {
|
445 | let j = 0;
|
446 | while (_SCAN_OFFSETS[scanCount][j]) {
|
447 | tempOffset = correlationOffset + _SCAN_OFFSETS[scanCount][j];
|
448 | if (tempOffset >= this.seekLength) {
|
449 | break;
|
450 | }
|
451 | correlation = this.calculateCrossCorrelationStereo(2 * tempOffset, this.refMidBuffer);
|
452 | if (correlation > bestCorrelation) {
|
453 | bestCorrelation = correlation;
|
454 | bestOffset = tempOffset;
|
455 | }
|
456 | j = j + 1;
|
457 | }
|
458 | correlationOffset = bestOffset;
|
459 | }
|
460 | return bestOffset;
|
461 | }
|
462 | preCalculateCorrelationReferenceStereo() {
|
463 | let i = 0;
|
464 | let context;
|
465 | let temp;
|
466 | for (; i < this.overlapLength; i = i + 1) {
|
467 | temp = i * (this.overlapLength - i);
|
468 | context = i * 2;
|
469 | this.refMidBuffer[context] = this.midBuffer[context] * temp;
|
470 | this.refMidBuffer[context + 1] = this.midBuffer[context + 1] * temp;
|
471 | }
|
472 | }
|
473 | calculateCrossCorrelationStereo(mixingPosition, compare) {
|
474 | const mixing = this._inputBuffer.vector;
|
475 | mixingPosition += this._inputBuffer.startIndex;
|
476 | let correlation = 0;
|
477 | let i = 2;
|
478 | const calcLength = 2 * this.overlapLength;
|
479 | let mixingOffset;
|
480 | for (; i < calcLength; i = i + 2) {
|
481 | mixingOffset = i + mixingPosition;
|
482 | correlation += mixing[mixingOffset] * compare[i] + mixing[mixingOffset + 1] * compare[i + 1];
|
483 | }
|
484 | return correlation;
|
485 | }
|
486 | overlap(overlapPosition) {
|
487 | this.overlapStereo(2 * overlapPosition);
|
488 | }
|
489 | overlapStereo(inputPosition) {
|
490 | const input = this._inputBuffer.vector;
|
491 | inputPosition += this._inputBuffer.startIndex;
|
492 | const output = this._outputBuffer.vector;
|
493 | const outputPosition = this._outputBuffer.endIndex;
|
494 | let i = 0;
|
495 | let context;
|
496 | let tempFrame;
|
497 | const frameScale = 1 / this.overlapLength;
|
498 | let fi;
|
499 | let inputOffset;
|
500 | let outputOffset;
|
501 | for (; i < this.overlapLength; i = i + 1) {
|
502 | tempFrame = (this.overlapLength - i) * frameScale;
|
503 | fi = i * frameScale;
|
504 | context = 2 * i;
|
505 | inputOffset = context + inputPosition;
|
506 | outputOffset = context + outputPosition;
|
507 | output[outputOffset + 0] = input[inputOffset + 0] * fi + this.midBuffer[context + 0] * tempFrame;
|
508 | output[outputOffset + 1] = input[inputOffset + 1] * fi + this.midBuffer[context + 1] * tempFrame;
|
509 | }
|
510 | }
|
511 | process() {
|
512 | let offset;
|
513 | let temp;
|
514 | let overlapSkip;
|
515 | if (this.midBuffer === null) {
|
516 | if (this._inputBuffer.frameCount < this.overlapLength) {
|
517 | return;
|
518 | }
|
519 | this.midBuffer = new Float32Array(this.overlapLength * 2);
|
520 | this._inputBuffer.receiveSamples(this.midBuffer, this.overlapLength);
|
521 | }
|
522 | while (this._inputBuffer.frameCount >= this.sampleReq) {
|
523 | offset = this.seekBestOverlapPosition();
|
524 | this._outputBuffer.ensureAdditionalCapacity(this.overlapLength);
|
525 | this.overlap(Math.floor(offset));
|
526 | this._outputBuffer.put(this.overlapLength);
|
527 | temp = this.seekWindowLength - 2 * this.overlapLength;
|
528 | if (temp > 0) {
|
529 | this._outputBuffer.putBuffer(this._inputBuffer, offset + this.overlapLength, temp);
|
530 | }
|
531 | const start = this._inputBuffer.startIndex + 2 * (offset + this.seekWindowLength - this.overlapLength);
|
532 | this.midBuffer.set(this._inputBuffer.vector.subarray(start, start + 2 * this.overlapLength));
|
533 | this.skipFract += this.nominalSkip;
|
534 | overlapSkip = Math.floor(this.skipFract);
|
535 | this.skipFract -= overlapSkip;
|
536 | this._inputBuffer.receive(overlapSkip);
|
537 | }
|
538 | }
|
539 | }
|
540 |
|
541 | const testFloatEqual = function (a, b) {
|
542 | return (a > b ? a - b : b - a) > 1e-10;
|
543 | };
|
544 |
|
545 | class SoundTouch {
|
546 | constructor() {
|
547 | this.transposer = new RateTransposer(false);
|
548 | this.stretch = new Stretch(false);
|
549 | this._inputBuffer = new FifoSampleBuffer();
|
550 | this._intermediateBuffer = new FifoSampleBuffer();
|
551 | this._outputBuffer = new FifoSampleBuffer();
|
552 | this._rate = 0;
|
553 | this._tempo = 0;
|
554 | this.virtualPitch = 1.0;
|
555 | this.virtualRate = 1.0;
|
556 | this.virtualTempo = 1.0;
|
557 | this.calculateEffectiveRateAndTempo();
|
558 | }
|
559 | clear() {
|
560 | this.transposer.clear();
|
561 | this.stretch.clear();
|
562 | }
|
563 | clone() {
|
564 | const result = new SoundTouch();
|
565 | result.rate = this.rate;
|
566 | result.tempo = this.tempo;
|
567 | return result;
|
568 | }
|
569 | get rate() {
|
570 | return this._rate;
|
571 | }
|
572 | set rate(rate) {
|
573 | this.virtualRate = rate;
|
574 | this.calculateEffectiveRateAndTempo();
|
575 | }
|
576 | set rateChange(rateChange) {
|
577 | this._rate = 1.0 + 0.01 * rateChange;
|
578 | }
|
579 | get tempo() {
|
580 | return this._tempo;
|
581 | }
|
582 | set tempo(tempo) {
|
583 | this.virtualTempo = tempo;
|
584 | this.calculateEffectiveRateAndTempo();
|
585 | }
|
586 | set tempoChange(tempoChange) {
|
587 | this.tempo = 1.0 + 0.01 * tempoChange;
|
588 | }
|
589 | set pitch(pitch) {
|
590 | this.virtualPitch = pitch;
|
591 | this.calculateEffectiveRateAndTempo();
|
592 | }
|
593 | set pitchOctaves(pitchOctaves) {
|
594 | this.pitch = Math.exp(0.69314718056 * pitchOctaves);
|
595 | this.calculateEffectiveRateAndTempo();
|
596 | }
|
597 | set pitchSemitones(pitchSemitones) {
|
598 | this.pitchOctaves = pitchSemitones / 12.0;
|
599 | }
|
600 | get inputBuffer() {
|
601 | return this._inputBuffer;
|
602 | }
|
603 | get outputBuffer() {
|
604 | return this._outputBuffer;
|
605 | }
|
606 | calculateEffectiveRateAndTempo() {
|
607 | const previousTempo = this._tempo;
|
608 | const previousRate = this._rate;
|
609 | this._tempo = this.virtualTempo / this.virtualPitch;
|
610 | this._rate = this.virtualRate * this.virtualPitch;
|
611 | if (testFloatEqual(this._tempo, previousTempo)) {
|
612 | this.stretch.tempo = this._tempo;
|
613 | }
|
614 | if (testFloatEqual(this._rate, previousRate)) {
|
615 | this.transposer.rate = this._rate;
|
616 | }
|
617 | if (this._rate > 1.0) {
|
618 | if (this._outputBuffer != this.transposer.outputBuffer) {
|
619 | this.stretch.inputBuffer = this._inputBuffer;
|
620 | this.stretch.outputBuffer = this._intermediateBuffer;
|
621 | this.transposer.inputBuffer = this._intermediateBuffer;
|
622 | this.transposer.outputBuffer = this._outputBuffer;
|
623 | }
|
624 | } else {
|
625 | if (this._outputBuffer != this.stretch.outputBuffer) {
|
626 | this.transposer.inputBuffer = this._inputBuffer;
|
627 | this.transposer.outputBuffer = this._intermediateBuffer;
|
628 | this.stretch.inputBuffer = this._intermediateBuffer;
|
629 | this.stretch.outputBuffer = this._outputBuffer;
|
630 | }
|
631 | }
|
632 | }
|
633 | process() {
|
634 | if (this._rate > 1.0) {
|
635 | this.stretch.process();
|
636 | this.transposer.process();
|
637 | } else {
|
638 | this.transposer.process();
|
639 | this.stretch.process();
|
640 | }
|
641 | }
|
642 | }
|
643 |
|
644 | class WebAudioBufferSource {
|
645 | constructor(buffer) {
|
646 | this.buffer = buffer;
|
647 | this._position = 0;
|
648 | }
|
649 | get dualChannel() {
|
650 | return this.buffer.numberOfChannels > 1;
|
651 | }
|
652 | get position() {
|
653 | return this._position;
|
654 | }
|
655 | set position(value) {
|
656 | this._position = value;
|
657 | }
|
658 | extract(target, numFrames = 0, position = 0) {
|
659 | this.position = position;
|
660 | let left = this.buffer.getChannelData(0);
|
661 | let right = this.dualChannel ? this.buffer.getChannelData(1) : this.buffer.getChannelData(0);
|
662 | let i = 0;
|
663 | for (; i < numFrames; i++) {
|
664 | target[i * 2] = left[i + position];
|
665 | target[i * 2 + 1] = right[i + position];
|
666 | }
|
667 | return Math.min(numFrames, left.length - position);
|
668 | }
|
669 | }
|
670 |
|
671 | const getWebAudioNode = function (context, filter, sourcePositionCallback = noop, bufferSize = 4096) {
|
672 | const node = context.createScriptProcessor(bufferSize, 2, 2);
|
673 | const samples = new Float32Array(bufferSize * 2);
|
674 | node.onaudioprocess = event => {
|
675 | let left = event.outputBuffer.getChannelData(0);
|
676 | let right = event.outputBuffer.getChannelData(1);
|
677 | let framesExtracted = filter.extract(samples, bufferSize);
|
678 | sourcePositionCallback(filter.sourcePosition);
|
679 | if (framesExtracted === 0) {
|
680 | filter.onEnd();
|
681 | }
|
682 | let i = 0;
|
683 | for (; i < framesExtracted; i++) {
|
684 | left[i] = samples[i * 2];
|
685 | right[i] = samples[i * 2 + 1];
|
686 | }
|
687 | };
|
688 | return node;
|
689 | };
|
690 |
|
691 | const pad = function (n, width, z) {
|
692 | z = z || '0';
|
693 | n = n + '';
|
694 | return n.length >= width ? n : new Array(width - n.length + 1).join(z) + n;
|
695 | };
|
696 | const minsSecs = function (secs) {
|
697 | const mins = Math.floor(secs / 60);
|
698 | const seconds = secs - mins * 60;
|
699 | return `${mins}:${pad(parseInt(seconds), 2)}`;
|
700 | };
|
701 |
|
702 | const onUpdate = function (sourcePosition) {
|
703 | const currentTimePlayed = this.timePlayed;
|
704 | const sampleRate = this.sampleRate;
|
705 | this.sourcePosition = sourcePosition;
|
706 | this.timePlayed = sourcePosition / sampleRate;
|
707 | if (currentTimePlayed !== this.timePlayed) {
|
708 | const timePlayed = new CustomEvent('play', {
|
709 | detail: {
|
710 | timePlayed: this.timePlayed,
|
711 | formattedTimePlayed: this.formattedTimePlayed,
|
712 | percentagePlayed: this.percentagePlayed
|
713 | }
|
714 | });
|
715 | this._node.dispatchEvent(timePlayed);
|
716 | }
|
717 | };
|
718 | class PitchShifter {
|
719 | constructor(context, buffer, bufferSize, onEnd = noop) {
|
720 | this._soundtouch = new SoundTouch();
|
721 | const source = new WebAudioBufferSource(buffer);
|
722 | this.timePlayed = 0;
|
723 | this.sourcePosition = 0;
|
724 | this._filter = new SimpleFilter(source, this._soundtouch, onEnd);
|
725 | this._node = getWebAudioNode(context, this._filter, sourcePostion => onUpdate.call(this, sourcePostion), bufferSize);
|
726 | this.tempo = 1;
|
727 | this.rate = 1;
|
728 | this.duration = buffer.duration;
|
729 | this.sampleRate = context.sampleRate;
|
730 | this.listeners = [];
|
731 | }
|
732 | get formattedDuration() {
|
733 | return minsSecs(this.duration);
|
734 | }
|
735 | get formattedTimePlayed() {
|
736 | return minsSecs(this.timePlayed);
|
737 | }
|
738 | get percentagePlayed() {
|
739 | return 100 * this._filter.sourcePosition / (this.duration * this.sampleRate);
|
740 | }
|
741 | set percentagePlayed(perc) {
|
742 | this._filter.sourcePosition = parseInt(perc * this.duration * this.sampleRate);
|
743 | this.sourcePosition = this._filter.sourcePosition;
|
744 | this.timePlayed = this.sourcePosition / this.sampleRate;
|
745 | }
|
746 | get node() {
|
747 | return this._node;
|
748 | }
|
749 | set pitch(pitch) {
|
750 | this._soundtouch.pitch = pitch;
|
751 | }
|
752 | set pitchSemitones(semitone) {
|
753 | this._soundtouch.pitchSemitones = semitone;
|
754 | }
|
755 | set rate(rate) {
|
756 | this._soundtouch.rate = rate;
|
757 | }
|
758 | set tempo(tempo) {
|
759 | this._soundtouch.tempo = tempo;
|
760 | }
|
761 | connect(toNode) {
|
762 | this._node.connect(toNode);
|
763 | }
|
764 | disconnect() {
|
765 | this._node.disconnect();
|
766 | }
|
767 | on(eventName, cb) {
|
768 | this.listeners.push({
|
769 | name: eventName,
|
770 | cb: cb
|
771 | });
|
772 | this._node.addEventListener(eventName, event => cb(event.detail));
|
773 | }
|
774 | off(eventName = null) {
|
775 | let listeners = this.listeners;
|
776 | if (eventName) {
|
777 | listeners = listeners.filter(e => e.name === eventName);
|
778 | }
|
779 | listeners.forEach(e => {
|
780 | this._node.removeEventListener(e.name, event => e.cb(event.detail));
|
781 | });
|
782 | }
|
783 | }
|
784 |
|
785 | export { AbstractFifoSamplePipe, PitchShifter, RateTransposer, SimpleFilter, SoundTouch, Stretch, WebAudioBufferSource, getWebAudioNode };
|
786 |
|