diff --git a/src/SoundModule/Effectors/Autopanner.ts b/src/SoundModule/Effectors/Autopanner.ts index 0ecb486af..1d81089c2 100644 --- a/src/SoundModule/Effectors/Autopanner.ts +++ b/src/SoundModule/Effectors/Autopanner.ts @@ -18,7 +18,7 @@ export class Autopanner extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.panner = this.context.createStereoPanner(); diff --git a/src/SoundModule/Effectors/BitCrusher.ts b/src/SoundModule/Effectors/BitCrusher.ts index 1f9c366ec..4a16c512d 100644 --- a/src/SoundModule/Effectors/BitCrusher.ts +++ b/src/SoundModule/Effectors/BitCrusher.ts @@ -25,7 +25,7 @@ export class BitCrusher extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.shaper = this.context.createWaveShaper(); this.inputShaper = this.context.createWaveShaper(); diff --git a/src/SoundModule/Effectors/Chorus.ts b/src/SoundModule/Effectors/Chorus.ts index 4da20ba9e..8b80bc150 100644 --- a/src/SoundModule/Effectors/Chorus.ts +++ b/src/SoundModule/Effectors/Chorus.ts @@ -26,7 +26,7 @@ export class Chorus extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.delay = context.createDelay(); this.mix = context.createGain(); diff --git a/src/SoundModule/Effectors/Compressor.ts b/src/SoundModule/Effectors/Compressor.ts index 9f4b63d58..2eeaeedde 100644 --- a/src/SoundModule/Effectors/Compressor.ts +++ b/src/SoundModule/Effectors/Compressor.ts @@ -21,7 +21,7 @@ export class Compressor extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.compressor = context.createDynamicsCompressor(); diff --git a/src/SoundModule/Effectors/Delay.ts b/src/SoundModule/Effectors/Delay.ts index 291231609..7726160ed 100644 --- a/src/SoundModule/Effectors/Delay.ts +++ b/src/SoundModule/Effectors/Delay.ts @@ -27,7 +27,7 @@ export class Delay extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.delay = context.createDelay(Delay.MAX_DELAY_TIME); this.dry = context.createGain(); diff --git a/src/SoundModule/Effectors/Effector.ts b/src/SoundModule/Effectors/Effector.ts index 92167d1d5..82b02ea30 100644 --- a/src/SoundModule/Effectors/Effector.ts +++ b/src/SoundModule/Effectors/Effector.ts @@ -1,5 +1,4 @@ import { Connectable, Statable } from '../../interfaces'; -import { BufferSize } from '../../types'; /** * This class is superclass for effector classes. @@ -17,16 +16,14 @@ export abstract class Effector implements Connectable, Statable { protected lfo: OscillatorNode; protected depth: GainNode; protected rate: AudioParam; - protected processor: ScriptProcessorNode; protected isActive = true; protected paused = true; /** * @param {AudioContext} context This argument is in order to use Web Audio API. - * @param {BufferSize} bufferSize This argument is buffer size for `ScriptProcessorNode`. */ - constructor(context: AudioContext, bufferSize: BufferSize) { + constructor(context: AudioContext) { this.context = context; // for connecting external modules @@ -35,10 +32,9 @@ export abstract class Effector implements Connectable, Statable { // for LFO (Low Frequency Oscillator) // LFO changes parameter cyclically - this.lfo = context.createOscillator(); - this.depth = context.createGain(); - this.rate = this.lfo.frequency; - this.processor = context.createScriptProcessor(bufferSize, 2, 2); + this.lfo = context.createOscillator(); + this.depth = context.createGain(); + this.rate = this.lfo.frequency; } /** diff --git a/src/SoundModule/Effectors/Equalizer.ts b/src/SoundModule/Effectors/Equalizer.ts index 3249e2e41..18fa908e3 100644 --- a/src/SoundModule/Effectors/Equalizer.ts +++ b/src/SoundModule/Effectors/Equalizer.ts @@ -23,7 +23,7 @@ export class Equalizer extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. **/ constructor(context: AudioContext) { - super(context, 0); + super(context); this.bass = context.createBiquadFilter(); this.middle = context.createBiquadFilter(); diff --git a/src/SoundModule/Effectors/Filter.ts b/src/SoundModule/Effectors/Filter.ts index cf0823390..35f0be985 100644 --- a/src/SoundModule/Effectors/Filter.ts +++ b/src/SoundModule/Effectors/Filter.ts @@ -33,7 +33,7 @@ export class Filter extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.filter = context.createBiquadFilter(); diff --git a/src/SoundModule/Effectors/Flanger.ts b/src/SoundModule/Effectors/Flanger.ts index 4a4625d7e..498837ffb 100644 --- a/src/SoundModule/Effectors/Flanger.ts +++ b/src/SoundModule/Effectors/Flanger.ts @@ -26,7 +26,7 @@ export class Flanger extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.delay = context.createDelay(); this.mix = context.createGain(); diff --git a/src/SoundModule/Effectors/Fuzz.ts b/src/SoundModule/Effectors/Fuzz.ts index e4a49a910..b49405981 100644 --- a/src/SoundModule/Effectors/Fuzz.ts +++ b/src/SoundModule/Effectors/Fuzz.ts @@ -28,7 +28,7 @@ export class Fuzz extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.positiveShaper = this.context.createWaveShaper(); this.negativeShaper = this.context.createWaveShaper(); diff --git a/src/SoundModule/Effectors/Listener.ts b/src/SoundModule/Effectors/Listener.ts index 9d7189979..e46fc73cb 100644 --- a/src/SoundModule/Effectors/Listener.ts +++ b/src/SoundModule/Effectors/Listener.ts @@ -29,7 +29,7 @@ export class Listener extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); // instance of `AudioListener` this.listener = context.listener; diff --git a/src/SoundModule/Effectors/NoiseGate.ts b/src/SoundModule/Effectors/NoiseGate.ts index 08c093ca7..c2f48b083 100644 --- a/src/SoundModule/Effectors/NoiseGate.ts +++ b/src/SoundModule/Effectors/NoiseGate.ts @@ -1,4 +1,5 @@ import { Effector } from './Effector'; +import { NoiseGateProcessor } from './AudioWorkletProcessors/NoiseGateProcessor'; export type NoiseGateParams = { state?: boolean, @@ -11,55 +12,26 @@ export type NoiseGateParams = { * @extends {Effector} */ export class NoiseGate extends Effector { + private processor: AudioWorkletNode; + private level = 0; /** * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); + this.processor = new AudioWorkletNode(this.context, NoiseGateProcessor.name); this.activate(); } /** @override */ public override start(): void { - if (!this.isActive || !this.paused) { - return; - } - - this.paused = false; - - const bufferSize = this.processor.bufferSize; - - this.processor.onaudioprocess = (event: AudioProcessingEvent) => { - const inputLs = event.inputBuffer.getChannelData(0); - const inputRs = event.inputBuffer.getChannelData(1); - const outputLs = event.outputBuffer.getChannelData(0); - const outputRs = event.outputBuffer.getChannelData(1); - - for (let i = 0; i < bufferSize; i++) { - outputLs[i] = this.gate(inputLs[i]); - outputRs[i] = this.gate(inputRs[i]); - } - }; } /** @override */ public override stop(): void { - // Effector's state is active ? - if (!this.isActive) { - return; - } - - this.paused = true; - - // Stop `onaudioprocess` event - this.processor.disconnect(0); - this.processor.onaudioprocess = null; - - // Connect `AudioNode`s again - this.connect(); } /** @override */ @@ -68,8 +40,16 @@ export class NoiseGate extends Effector { this.input.disconnect(0); this.processor.disconnect(0); + this.processor = new AudioWorkletNode(this.context, NoiseGateProcessor.name); + + const message: NoiseGateParams = { + level: this.level + }; + + this.processor.port.postMessage(message); + if (this.isActive) { - // GainNode (Input) -> ScriptProcessorNode (Noise Gate) -> GainNode (Output); + // GainNode (Input) -> AudioWorkletNode (Noise Gate) -> GainNode (Output); this.input.connect(this.processor); this.processor.connect(this.output); } else { @@ -106,12 +86,24 @@ export class NoiseGate extends Effector { case 'state': if (typeof value === 'boolean') { this.isActive = value; + + if (this.processor) { + const message: NoiseGateParams = { state: value }; + + this.processor.port.postMessage(message); + } } break; case 'level': if (typeof value === 'number') { this.level = value; + + if (this.processor) { + const message: NoiseGateParams = { level: value }; + + this.processor.port.postMessage(message); + } } break; @@ -142,19 +134,4 @@ export class NoiseGate extends Effector { super.deactivate(); return this; } - - /** - * This method detects background noise and removes this. - * @param {number} data This argument is amplitude (between -1 and 1). - * @return {number} Return value is `0` or raw data. - */ - private gate(data: number): number { - if (!this.isActive) { - return data; - } - - // data : Amplitude is equal to argument. - // 0 : Because signal is detected as background noise, amplitude is `0`. - return (Math.abs(data) > this.level) ? data : 0; - } } diff --git a/src/SoundModule/Effectors/NoiseSuppressor.ts b/src/SoundModule/Effectors/NoiseSuppressor.ts index dc74b425c..09f33b858 100644 --- a/src/SoundModule/Effectors/NoiseSuppressor.ts +++ b/src/SoundModule/Effectors/NoiseSuppressor.ts @@ -1,6 +1,5 @@ -import { BufferSize } from '../../types'; -import { fft, ifft } from '../../XSound'; import { Effector } from './Effector'; +import { NoiseSuppressorProcessor } from './AudioWorkletProcessors/NoiseSuppressorProcessor'; export type NoiseSuppressorParams = { state?: boolean, @@ -13,54 +12,26 @@ export type NoiseSuppressorParams = { * @extends {Effector} */ export class NoiseSuppressor extends Effector { + private processor: AudioWorkletNode; + private threshold = 0; /** * @param {AudioContext} context This argument is in order to use Web Audio API. - * @param {BufferSize} bufferSize This argument is buffer size for `ScriptProcessorNode`. */ - constructor(context: AudioContext, bufferSize: BufferSize) { - super(context, bufferSize); + constructor(context: AudioContext) { + super(context); + this.processor = new AudioWorkletNode(this.context, NoiseSuppressorProcessor.name); this.activate(); } /** @override */ public override start(): void { - if (!this.isActive || !this.paused) { - return; - } - - this.paused = false; - - const bufferSize = this.processor.bufferSize; - - this.processor.onaudioprocess = (event: AudioProcessingEvent) => { - const inputLs = event.inputBuffer.getChannelData(0); - const inputRs = event.inputBuffer.getChannelData(1); - const outputLs = event.outputBuffer.getChannelData(0); - const outputRs = event.outputBuffer.getChannelData(1); - - this.suppress(inputLs, outputLs, bufferSize); - this.suppress(inputRs, outputRs, bufferSize); - }; } /** @override */ public override stop(): void { - // Effector's state is active ? - if (!this.isActive) { - return; - } - - this.paused = true; - - // Stop `onaudioprocess` event - this.processor.disconnect(0); - this.processor.onaudioprocess = null; - - // Connect `AudioNode`s again - this.connect(); } /** @override */ @@ -69,8 +40,16 @@ export class NoiseSuppressor extends Effector { this.input.disconnect(0); this.processor.disconnect(0); + this.processor = new AudioWorkletNode(this.context, NoiseSuppressorProcessor.name); + + const message: NoiseSuppressorParams = { + threshold: this.threshold + }; + + this.processor.port.postMessage(message); + if (this.isActive) { - // GainNode (Input) -> ScriptProcessorNode (Noise Suppressor) -> GainNode (Output); + // GainNode (Input) -> AudioWorkletNode (Noise Suppressor) -> GainNode (Output); this.input.connect(this.processor); this.processor.connect(this.output); } else { @@ -107,6 +86,12 @@ export class NoiseSuppressor extends Effector { case 'state': if (typeof value === 'boolean') { this.isActive = value; + + if (this.processor) { + const message: NoiseSuppressorParams = { state: value }; + + this.processor.port.postMessage(message); + } } break; @@ -114,6 +99,12 @@ export class NoiseSuppressor extends Effector { if (typeof value === 'number') { if (value >= 0) { this.threshold = value; + + if (this.processor) { + const message: NoiseSuppressorParams = { threshold: value }; + + this.processor.port.postMessage(message); + } } } @@ -145,53 +136,4 @@ export class NoiseSuppressor extends Effector { super.deactivate(); return this; } - - /** - * This method detects background noise and removes this. - * @param {Float32Array} inputs This argument is instance of `Float32Array` for FFT/IFFT. - * @param {Float32Array} outputs This argument is instance of `Float32Array` for FFT/IFFT. - * @param {number} fftSize This argument is FFT/IFFT size (power of two). - */ - private suppress(inputs: Float32Array, outputs: Float32Array, fftSize: number): void { - if (!this.isActive || (this.threshold === 0)) { - outputs.set(inputs); - return; - } - - const xreals = new Float32Array(inputs); - const ximags = new Float32Array(fftSize); - - const yreals = new Float32Array(fftSize); - const yimags = new Float32Array(fftSize); - - const amplitudes = new Float32Array(fftSize); - const phases = new Float32Array(fftSize); - - fft(xreals, ximags, fftSize); - - for (let k = 0; k < fftSize; k++) { - amplitudes[k] = Math.sqrt((xreals[k] ** 2) + (ximags[k] ** 2)); - - if ((xreals[k] !== 0) && (ximags[k] !== 0)) { - phases[k] = Math.atan2(ximags[k], xreals[k]); - } - } - - for (let k = 0; k < fftSize; k++) { - amplitudes[k] -= this.threshold; - - if (amplitudes[k] < 0) { - amplitudes[k] = 0; - } - } - - for (let k = 0; k < fftSize; k++) { - yreals[k] = amplitudes[k] * Math.cos(phases[k]); - yimags[k] = amplitudes[k] * Math.sin(phases[k]); - } - - ifft(yreals, yimags, fftSize); - - outputs.set(yreals); - } } diff --git a/src/SoundModule/Effectors/OverDrive.ts b/src/SoundModule/Effectors/OverDrive.ts index 7687decd4..6602e8d77 100644 --- a/src/SoundModule/Effectors/OverDrive.ts +++ b/src/SoundModule/Effectors/OverDrive.ts @@ -26,7 +26,7 @@ export class OverDrive extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.shaper = this.context.createWaveShaper(); this.inputShaper = this.context.createWaveShaper(); diff --git a/src/SoundModule/Effectors/Panner.ts b/src/SoundModule/Effectors/Panner.ts index b5d2f60d4..e54e91601 100644 --- a/src/SoundModule/Effectors/Panner.ts +++ b/src/SoundModule/Effectors/Panner.ts @@ -38,7 +38,7 @@ export class Panner extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.panner = context.createPanner(); diff --git a/src/SoundModule/Effectors/Phaser.ts b/src/SoundModule/Effectors/Phaser.ts index 3466ad032..f8ac75d55 100644 --- a/src/SoundModule/Effectors/Phaser.ts +++ b/src/SoundModule/Effectors/Phaser.ts @@ -29,7 +29,7 @@ export class Phaser extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); for (let i = 0; i < Phaser.MAX_STAGES; i++) { const filter = context.createBiquadFilter(); diff --git a/src/SoundModule/Effectors/PitchShifter.ts b/src/SoundModule/Effectors/PitchShifter.ts index d4fe592ae..7761ba602 100644 --- a/src/SoundModule/Effectors/PitchShifter.ts +++ b/src/SoundModule/Effectors/PitchShifter.ts @@ -1,6 +1,5 @@ -import { BufferSize } from '../../types'; -import { fft, ifft } from '../../XSound'; import { Effector } from './Effector'; +import { PitchShifterProcessor } from './AudioWorkletProcessors/PitchShifterProcessor'; export type PitchShifterParams = { state?: boolean, @@ -13,97 +12,26 @@ export type PitchShifterParams = { * @extends {Effector} */ export class PitchShifter extends Effector { - public static readonly GAIN_CORRECTION = 2.0 as const; + private processor: AudioWorkletNode; - private pitch = 1.0; + private pitch = 1; /** * @param {AudioContext} context This argument is in order to use Web Audio API. - * @param {BufferSize} bufferSize This argument is buffer size for `ScriptProcessorNode`. */ - constructor(context: AudioContext, bufferSize: BufferSize) { - super(context, bufferSize); + constructor(context: AudioContext) { + super(context); - // `PitchShifter` is not connected by default - this.deactivate(); - - this.connect(); + this.processor = new AudioWorkletNode(this.context, PitchShifterProcessor.name); + this.activate(); } /** @override */ public override start(): void { - if (!this.isActive || !this.paused) { - return; - } - - this.paused = false; - - const bufferSize = this.processor.bufferSize; - - this.processor.onaudioprocess = (event: AudioProcessingEvent) => { - const inputLs = event.inputBuffer.getChannelData(0); - const inputRs = event.inputBuffer.getChannelData(1); - const outputLs = event.outputBuffer.getChannelData(0); - const outputRs = event.outputBuffer.getChannelData(1); - - if (this.isActive && (this.pitch !== 1)) { - const realLs = new Float32Array(inputLs); - const realRs = new Float32Array(inputRs); - const imagLs = new Float32Array(bufferSize); - const imagRs = new Float32Array(bufferSize); - - fft(realLs, imagLs, bufferSize); - fft(realRs, imagRs, bufferSize); - - const arealLs = new Float32Array(bufferSize); - const arealRs = new Float32Array(bufferSize); - const aimagLs = new Float32Array(bufferSize); - const aimagRs = new Float32Array(bufferSize); - - for (let i = 0; i < bufferSize; i++) { - const offset = Math.trunc(this.pitch * i); - - let eq = 1; - - if (i > (bufferSize / 2)) { - eq = 0; - } - - if ((offset >= 0) && (offset < bufferSize)) { - arealLs[offset] += PitchShifter.GAIN_CORRECTION * eq * realLs[i]; - aimagLs[offset] += PitchShifter.GAIN_CORRECTION * eq * imagLs[i]; - arealRs[offset] += PitchShifter.GAIN_CORRECTION * eq * realRs[i]; - aimagRs[offset] += PitchShifter.GAIN_CORRECTION * eq * imagRs[i]; - } - } - - ifft(arealLs, aimagLs, bufferSize); - ifft(arealRs, aimagRs, bufferSize); - - outputLs.set(arealLs); - outputRs.set(arealRs); - } else { - outputLs.set(inputLs); - outputRs.set(inputRs); - } - }; } /** @override */ public override stop(): void { - // Effector's state is active ? - if (!this.isActive) { - return; - } - - this.paused = true; - - // Stop `onaudioprocess` event - this.processor.disconnect(0); - this.processor.onaudioprocess = null; - - // Connect `AudioNode`s again - this.connect(); } /** @override */ @@ -112,8 +40,16 @@ export class PitchShifter extends Effector { this.input.disconnect(0); this.processor.disconnect(0); + this.processor = new AudioWorkletNode(this.context, PitchShifterProcessor.name); + + const message: PitchShifterParams = { + pitch: this.pitch + }; + + this.processor.port.postMessage(message); + if (this.isActive) { - // GainNode (Input) -> ScriptProcessorNode (Pitch Shifter) -> GainNode (Output); + // GainNode (Input) -> AudioWorkletNode (Pitch Shifter) -> GainNode (Output); this.input.connect(this.processor); this.processor.connect(this.output); } else { @@ -151,6 +87,12 @@ export class PitchShifter extends Effector { case 'state': if (typeof value === 'boolean') { this.isActive = value; + + if (this.processor) { + const message: PitchShifterParams = { state: value }; + + this.processor.port.postMessage(message); + } } break; @@ -158,6 +100,12 @@ export class PitchShifter extends Effector { if (typeof value === 'number') { if (value > 0) { this.pitch = value; + + if (this.processor) { + const message: PitchShifterParams = { pitch: value }; + + this.processor.port.postMessage(message); + } } } diff --git a/src/SoundModule/Effectors/Preamp.ts b/src/SoundModule/Effectors/Preamp.ts index 95279fad3..0bd8e2151 100644 --- a/src/SoundModule/Effectors/Preamp.ts +++ b/src/SoundModule/Effectors/Preamp.ts @@ -49,7 +49,7 @@ export class PreEqualizer extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.shaper = context.createWaveShaper(); @@ -214,7 +214,7 @@ export class PostEqualizer extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.shaper = context.createWaveShaper(); @@ -393,7 +393,7 @@ export class Cabinet extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.lowpass = context.createBiquadFilter(); this.notch = context.createBiquadFilter(); @@ -529,7 +529,7 @@ export class Preamp extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.preEQ = new PreEqualizer(context); this.postEQ = new PostEqualizer(context); diff --git a/src/SoundModule/Effectors/Reverb.ts b/src/SoundModule/Effectors/Reverb.ts index 41d913fc5..4b05d46cc 100644 --- a/src/SoundModule/Effectors/Reverb.ts +++ b/src/SoundModule/Effectors/Reverb.ts @@ -31,7 +31,7 @@ export class Reverb extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.convolver = context.createConvolver(); this.dry = context.createGain(); diff --git a/src/SoundModule/Effectors/Ringmodulator.ts b/src/SoundModule/Effectors/Ringmodulator.ts index df91baef9..03ba29762 100644 --- a/src/SoundModule/Effectors/Ringmodulator.ts +++ b/src/SoundModule/Effectors/Ringmodulator.ts @@ -18,7 +18,7 @@ export class Ringmodulator extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.amplitude = context.createGain(); diff --git a/src/SoundModule/Effectors/Stereo.ts b/src/SoundModule/Effectors/Stereo.ts index 7c043340e..b8ada6f9e 100644 --- a/src/SoundModule/Effectors/Stereo.ts +++ b/src/SoundModule/Effectors/Stereo.ts @@ -1,5 +1,5 @@ -import { BufferSize } from '../../types'; import { Effector } from './Effector'; +import { StereoProcessor } from './AudioWorkletProcessors/StereoProcessor'; export type StereoParams = { state?: boolean, @@ -14,6 +14,8 @@ export type StereoParams = { export class Stereo extends Effector { public static MAX_DELAY_TIME = 1; // Max delay time is 1000 [ms] + private processor: AudioWorkletNode; + private splitter: ChannelSplitterNode; private merger: ChannelMergerNode; private delayL: DelayNode; @@ -21,13 +23,12 @@ export class Stereo extends Effector { /** * @param {AudioContext} context This argument is in order to use Web Audio API. - * @param {BufferSize} bufferSize This argument is buffer size for `ScriptProcessorNode`. */ - constructor(context: AudioContext, bufferSize: BufferSize) { - super(context, bufferSize); + constructor(context: AudioContext) { + super(context); this.splitter = context.createChannelSplitter(2); - this.merger = context.createScriptProcessor(bufferSize, 2, 2); + this.merger = context.createChannelMerger(2); this.delayL = context.createDelay(Stereo.MAX_DELAY_TIME); this.delayR = context.createDelay(Stereo.MAX_DELAY_TIME); @@ -35,53 +36,16 @@ export class Stereo extends Effector { this.delayL.delayTime.value = 0; this.delayR.delayTime.value = 0; - // `Stereo` is not connected by default + this.processor = new AudioWorkletNode(this.context, StereoProcessor.name); this.deactivate(); } /** @override */ public override start(): void { - if (!this.isActive || !this.paused) { - return; - } - - this.paused = false; - - const bufferSize = this.processor.bufferSize; - - this.processor.onaudioprocess = (event: AudioProcessingEvent) => { - const inputLs = event.inputBuffer.getChannelData(0); - const inputRs = event.inputBuffer.getChannelData(1); - const outputLs = event.outputBuffer.getChannelData(0); - const outputRs = event.outputBuffer.getChannelData(1); - - if (this.isActive && (this.delayL.delayTime.value !== 0) && (this.delayR.delayTime.value !== 0)) { - for (let i = 0; i < bufferSize; i++) { - outputLs[i] = inputLs[i]; - outputRs[i] = -inputRs[i]; - } - } else { - outputLs.set(inputLs); - outputRs.set(inputRs); - } - }; } /** @override */ public override stop(): void { - // Effector's state is active ? - if (!this.isActive) { - return; - } - - this.paused = true; - - // Stop `onaudioprocess` event - this.processor.disconnect(0); - this.processor.onaudioprocess = null; - - // Connect `AudioNode`s again - this.connect(); } /** @override */ @@ -92,16 +56,26 @@ export class Stereo extends Effector { this.delayL.disconnect(0); this.delayR.disconnect(0); this.merger.disconnect(0); + this.processor.disconnect(0); + + this.processor = new AudioWorkletNode(this.context, StereoProcessor.name); + + const message: StereoParams = { + time: this.delayL.delayTime.value + }; + + this.processor.port.postMessage(message); if (this.isActive) { // Effect ON - // GainNode (Input) -> ChannelSplitterNode -> DelayNode (L) / (R) -> ScriptProcessorNode (Stereo) -> GainNode (Output) + // GainNode (Input) -> ChannelSplitterNode -> DelayNode (L) / (R) -> AudioWorkletNode (Stereo) -> ChannelMergerNode -> GainNode (Output) this.input.connect(this.splitter); this.splitter.connect(this.delayL, 0, 0); this.splitter.connect(this.delayR, 1, 0); - this.delayL.connect(this.merger); - this.delayR.connect(this.merger); + this.delayL.connect(this.processor); + this.delayR.connect(this.processor); + this.processor.connect(this.merger); this.merger.connect(this.output); } else { // Effect OFF @@ -140,6 +114,12 @@ export class Stereo extends Effector { case 'state': if (typeof value === 'boolean') { this.isActive = value; + + if (this.processor) { + const message: StereoParams = { state: value }; + + this.processor.port.postMessage(message); + } } break; diff --git a/src/SoundModule/Effectors/Tremolo.ts b/src/SoundModule/Effectors/Tremolo.ts index 4dabaac37..0d5875827 100644 --- a/src/SoundModule/Effectors/Tremolo.ts +++ b/src/SoundModule/Effectors/Tremolo.ts @@ -19,7 +19,7 @@ export class Tremolo extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.amplitude = context.createGain(); diff --git a/src/SoundModule/Effectors/VocalCanceler.ts b/src/SoundModule/Effectors/VocalCanceler.ts index 70f847b91..3608468ed 100644 --- a/src/SoundModule/Effectors/VocalCanceler.ts +++ b/src/SoundModule/Effectors/VocalCanceler.ts @@ -1,5 +1,5 @@ -import { BufferSize } from '../../types'; import { Effector } from './Effector'; +import { VocalCancelerProcessor } from './AudioWorkletProcessors/VocalCancelerProcessor'; export type VocalCancelerParams = { state?: boolean, @@ -12,56 +12,26 @@ export type VocalCancelerParams = { * @extends {Effector} */ export class VocalCanceler extends Effector { + private processor: AudioWorkletNode; + /** * @param {AudioContext} context This argument is in order to use Web Audio API. - * @param {BufferSize} bufferSize This argument is buffer size for `ScriptProcessorNode`. */ - constructor(context: AudioContext, bufferSize: BufferSize) { - super(context, bufferSize); + constructor(context: AudioContext) { + super(context); this.depth.gain.value = 0; + this.processor = new AudioWorkletNode(this.context, VocalCancelerProcessor.name); this.activate(); } /** @override */ public override start(): void { - if (!this.isActive || !this.paused) { - return; - } - - this.paused = false; - - const bufferSize = this.processor.bufferSize; - - this.processor.onaudioprocess = (event: AudioProcessingEvent) => { - const inputLs = event.inputBuffer.getChannelData(0); - const inputRs = event.inputBuffer.getChannelData(1); - const outputLs = event.outputBuffer.getChannelData(0); - const outputRs = event.outputBuffer.getChannelData(1); - - for (let i = 0; i < bufferSize; i++) { - outputLs[i] = this.cancel(inputLs[i], inputRs[i]); - outputRs[i] = this.cancel(inputRs[i], inputLs[i]); - } - }; } /** @override */ public override stop(): void { - // Effector's state is active ? - if (!this.isActive) { - return; - } - - this.paused = true; - - // Stop `onaudioprocess` event - this.processor.disconnect(0); - this.processor.onaudioprocess = null; - - // Connect `AudioNode`s again - this.connect(); } /** @override */ @@ -70,8 +40,16 @@ export class VocalCanceler extends Effector { this.input.disconnect(0); this.processor.disconnect(0); + this.processor = new AudioWorkletNode(this.context, VocalCancelerProcessor.name); + + const message: VocalCancelerParams = { + depth: this.depth.gain.value + }; + + this.processor.port.postMessage(message); + if (this.isActive) { - // GainNode (Input) -> ScriptProcessorNode (Vocal Canceler) -> GainNode (Output); + // GainNode (Input) -> AudioWorkletNode (Vocal Canceler) -> GainNode (Output); this.input.connect(this.processor); this.processor.connect(this.output); } else { @@ -81,6 +59,7 @@ export class VocalCanceler extends Effector { return this.output; } + /** * This method gets or sets parameters for vocal canceler. * @param {keyof VocalCancelerParams|VocalCancelerParams} params This argument is string if getter. Otherwise, setter. @@ -107,12 +86,24 @@ export class VocalCanceler extends Effector { case 'state': if (typeof value === 'boolean') { this.isActive = value; + + if (this.processor) { + const message: VocalCancelerParams = { state: value }; + + this.processor.port.postMessage(message); + } } break; case 'depth': if (typeof value === 'number') { this.depth.gain.value = value; + + if (this.processor) { + const message: VocalCancelerParams = { depth: value }; + + this.processor.port.postMessage(message); + } } break; @@ -146,18 +137,4 @@ export class VocalCanceler extends Effector { super.deactivate(); return this; } - - /** - * This method removes vocal part from audio on playing. - * @param {number} dataL This argument is gain level for Left channel. - * @param {number} dataR This argument is gain level for Right channel. - * @return {number} Return value is audio data except vocal part. - */ - private cancel(dataL: number, dataR: number): number { - if (this.isActive) { - return dataL - (this.depth.gain.value * dataR); - } - - return dataL; - } } diff --git a/src/SoundModule/Effectors/Wah.ts b/src/SoundModule/Effectors/Wah.ts index 60fc4952a..8c21345c8 100644 --- a/src/SoundModule/Effectors/Wah.ts +++ b/src/SoundModule/Effectors/Wah.ts @@ -27,7 +27,7 @@ export class Wah extends Effector { * @param {AudioContext} context This argument is in order to use Web Audio API. */ constructor(context: AudioContext) { - super(context, 0); + super(context); this.lowpass = context.createBiquadFilter(); this.envelopeFollower = context.createWaveShaper(); diff --git a/src/SoundModule/index.ts b/src/SoundModule/index.ts index 8e1cbca45..c0f806415 100644 --- a/src/SoundModule/index.ts +++ b/src/SoundModule/index.ts @@ -189,17 +189,17 @@ export abstract class SoundModule implements Connectable { this.fuzz = new Fuzz(context); this.listener = new Listener(context); this.noisegate = new NoiseGate(context); - this.noisesuppressor = new NoiseSuppressor(context, bufferSize); + this.noisesuppressor = new NoiseSuppressor(context); this.overdrive = new OverDrive(context); this.panner = new Panner(context); this.phaser = new Phaser(context); - this.pitchshifter = new PitchShifter(context, bufferSize); + this.pitchshifter = new PitchShifter(context); this.preamp = new Preamp(context); this.reverb = new Reverb(context); this.ringmodulator = new Ringmodulator(context); - this.stereo = new Stereo(context, bufferSize); + this.stereo = new Stereo(context); this.tremolo = new Tremolo(context); - this.vocalcanceler = new VocalCanceler(context, bufferSize); + this.vocalcanceler = new VocalCanceler(context); this.wah = new Wah(context); // The default order for connection @@ -338,7 +338,6 @@ export abstract class SoundModule implements Connectable { this.overdrive.start(s); this.phaser.start(s); this.ringmodulator.start(s); - this.stereo.start(); this.tremolo.start(s); this.wah.start(s); @@ -362,7 +361,6 @@ export abstract class SoundModule implements Connectable { this.overdrive.stop(s); this.phaser.stop(s); this.ringmodulator.stop(s); - this.stereo.stop(); this.tremolo.stop(s); this.wah.stop(s); @@ -509,17 +507,17 @@ export abstract class SoundModule implements Connectable { this.fuzz = new Fuzz(context); this.listener = new Listener(context); this.noisegate = new NoiseGate(context); - this.noisesuppressor = new NoiseSuppressor(context, bufferSize); + this.noisesuppressor = new NoiseSuppressor(context); this.overdrive = new OverDrive(context); this.panner = new Panner(context); this.phaser = new Phaser(context); - this.pitchshifter = new PitchShifter(context, bufferSize); + this.pitchshifter = new PitchShifter(context); this.preamp = new Preamp(context); this.reverb = new Reverb(context); this.ringmodulator = new Ringmodulator(context); - this.stereo = new Stereo(context, bufferSize); + this.stereo = new Stereo(context); this.tremolo = new Tremolo(context); - this.vocalcanceler = new VocalCanceler(context, bufferSize); + this.vocalcanceler = new VocalCanceler(context); this.wah = new Wah(context); // The default order for connection diff --git a/test/SoundModule/Effectors/NoiseGate.test.ts b/test/SoundModule/Effectors/NoiseGate.test.ts index d982d5f24..c0731402a 100644 --- a/test/SoundModule/Effectors/NoiseGate.test.ts +++ b/test/SoundModule/Effectors/NoiseGate.test.ts @@ -7,27 +7,6 @@ describe(NoiseGate.name, () => { // @ts-ignore const noisegate = new NoiseGate(context); - // eslint-disable-next-line dot-notation - describe(noisegate['gate'].name, () => { - beforeAll(() => { - noisegate.param({ level: 0.002 }); - }); - - test('should return raw data', () => { - /* eslint-disable dot-notation */ - expect(noisegate['gate'](0.005)).toBeCloseTo(0.005, 3); - expect(noisegate['gate'](-0.005)).toBeCloseTo(-0.005, 3); - /* eslint-enable dot-notation */ - }); - - test('should return `0`', () => { - /* eslint-disable dot-notation */ - expect(noisegate['gate'](0.002)).toBeCloseTo(0, 3); - expect(noisegate['gate'](-0.002)).toBeCloseTo(0, 3); - /* eslint-enable dot-notation */ - }); - }); - describe(noisegate.param.name, () => { const defaultParams: NoiseGateParams = { level: 0 @@ -82,7 +61,13 @@ describe(NoiseGate.name, () => { }); describe(noisegate.deactivate.name, () => { - test('should call `connect` method and stop `onaudioprocess` event handler', () => { + test('should call `connect` method', () => { + // HACK: + // eslint-disable-next-line dot-notation + if (noisegate['processor'] === null) { + return; + } + const originalConnect = noisegate.connect; // eslint-disable-next-line dot-notation @@ -98,10 +83,7 @@ describe(NoiseGate.name, () => { noisegate.deactivate(); - expect(connectMock).toHaveBeenCalledTimes(2); - - // eslint-disable-next-line dot-notation - expect(noisegate['processor'].onaudioprocess).toBe(null); + expect(connectMock).toHaveBeenCalledTimes(1); noisegate.connect = originalConnect; diff --git a/test/SoundModule/Effectors/NoiseSuppressor.test.ts b/test/SoundModule/Effectors/NoiseSuppressor.test.ts index b26223183..a28515955 100644 --- a/test/SoundModule/Effectors/NoiseSuppressor.test.ts +++ b/test/SoundModule/Effectors/NoiseSuppressor.test.ts @@ -5,37 +5,7 @@ describe(NoiseSuppressor.name, () => { const context = new AudioContextMock(); // @ts-ignore - const noisesuppressor = new NoiseSuppressor(context, 2048); - - // eslint-disable-next-line dot-notation - describe(noisesuppressor['suppress'].name, () => { - const bufferSize = 8; - const inputs = new Float32Array([0.5, 0.25, 0, -0.25, -0.5, -0.25, 0, 0.25]); - const outputs = new Float32Array(bufferSize); - - test('should return raw data (if threshold is `0`)', () => { - // eslint-disable dot-notation - noisesuppressor['suppress'](inputs, outputs, bufferSize); - - expect(outputs).toStrictEqual(inputs); - }); - - test('should return sound data that background noise is removed from', () => { - noisesuppressor.param({ threshold: 0.3 }); - - // eslint-disable dot-notation - noisesuppressor['suppress'](inputs, outputs, bufferSize); - - expect(outputs[0]).toBeCloseTo(0.35177671909332275, 5); - expect(outputs[1]).toBeCloseTo(0.2487436980009079, 5); - expect(outputs[2]).toBeCloseTo(0, 5); - expect(outputs[3]).toBeCloseTo(-0.2487436980009079, 5); - expect(outputs[4]).toBeCloseTo(-0.35177671909332275, 5); - expect(outputs[5]).toBeCloseTo(-0.2487436980009079, 5); - expect(outputs[6]).toBeCloseTo(0, 5); - expect(outputs[7]).toBeCloseTo(0.2487436980009079, 5); - }); - }); + const noisesuppressor = new NoiseSuppressor(context); describe(noisesuppressor.param.name, () => { const defaultParams: NoiseSuppressorParams = { @@ -91,7 +61,13 @@ describe(NoiseSuppressor.name, () => { }); describe(noisesuppressor.deactivate.name, () => { - test('should call `connect` method and stop `onaudioprocess` event handler', () => { + test('should call `connect` method', () => { + // HACK: + // eslint-disable-next-line dot-notation + if (noisesuppressor['processor'] === null) { + return; + } + const originalConnect = noisesuppressor.connect; // eslint-disable-next-line dot-notation @@ -107,10 +83,7 @@ describe(NoiseSuppressor.name, () => { noisesuppressor.deactivate(); - expect(connectMock).toHaveBeenCalledTimes(2); - - // eslint-disable-next-line dot-notation - expect(noisesuppressor['processor'].onaudioprocess).toBe(null); + expect(connectMock).toHaveBeenCalledTimes(1); noisesuppressor.connect = originalConnect; diff --git a/test/SoundModule/Effectors/PitchShifter.test.ts b/test/SoundModule/Effectors/PitchShifter.test.ts index a8bab7c2c..e4c919a1b 100644 --- a/test/SoundModule/Effectors/PitchShifter.test.ts +++ b/test/SoundModule/Effectors/PitchShifter.test.ts @@ -5,48 +5,7 @@ describe(PitchShifter.name, () => { const context = new AudioContextMock(); // @ts-ignore - const pitchshifter = new PitchShifter(context, 2048); - - describe(pitchshifter.start.name, () => { - test('should be `false` after start', () => { - // eslint-disable-next-line dot-notation - expect(pitchshifter['paused']).toBe(true); - - pitchshifter.activate(); - pitchshifter.start(); - - // eslint-disable-next-line dot-notation - expect(pitchshifter['paused']).toBe(false); - - pitchshifter.stop(); - pitchshifter.deactivate(); - }); - }); - - describe(pitchshifter.stop.name, () => { - test('should call `disconnect` method and stop `onaudioprocess` event handler', () => { - // eslint-disable-next-line dot-notation - const originalProcessor = pitchshifter['processor']; - - const disconnectMock = jest.fn(); - - // eslint-disable-next-line dot-notation - pitchshifter['processor'].disconnect = disconnectMock; - - pitchshifter.activate(); - pitchshifter.stop(); - - expect(disconnectMock).toHaveBeenCalledTimes(3); - - // eslint-disable-next-line dot-notation - expect(pitchshifter['processor'].onaudioprocess).toBe(null); - - // eslint-disable-next-line dot-notation - pitchshifter['processor'] = originalProcessor; - - pitchshifter.deactivate(); - }); - }); + const pitchshifter = new PitchShifter(context); describe(pitchshifter.connect.name, () => { /* eslint-disable dot-notation */ @@ -64,6 +23,12 @@ describe(PitchShifter.name, () => { }); test('should call `connect` method', () => { + // HACK: + // eslint-disable-next-line dot-notation + if (pitchshifter['processor'] === null) { + return; + } + const inputConnectMock = jest.fn(); const inputDisconnectMock = jest.fn(); const processorConnectMock = jest.fn(); @@ -86,15 +51,15 @@ describe(PitchShifter.name, () => { pitchshifter.activate(); expect(inputConnectMock).toHaveBeenCalledTimes(2); - expect(processorConnectMock).toHaveBeenCalledTimes(1); + expect(processorConnectMock).toHaveBeenCalledTimes(0); expect(inputDisconnectMock).toHaveBeenCalledTimes(2); - expect(processorDisconnectMock).toHaveBeenCalledTimes(2); + expect(processorDisconnectMock).toHaveBeenCalledTimes(1); }); }); describe(pitchshifter.param.name, () => { const defaultParams: PitchShifterParams = { - pitch: 1.0 + pitch: 1 }; const params: PitchShifterParams = { @@ -124,7 +89,7 @@ describe(PitchShifter.name, () => { test('should return parameters for pitch shifter as associative array', () => { expect(pitchshifter.params()).toStrictEqual({ state: false, - pitch: 1.0 + pitch: 1 }); }); }); @@ -146,7 +111,13 @@ describe(PitchShifter.name, () => { }); describe(pitchshifter.deactivate.name, () => { - test('should call `connect` method and stop `onaudioprocess` event handler', () => { + test('should call `connect` method', () => { + // HACK: + // eslint-disable-next-line dot-notation + if (pitchshifter['processor'] === null) { + return; + } + const originalConnect = pitchshifter.connect; // eslint-disable-next-line dot-notation @@ -162,10 +133,7 @@ describe(PitchShifter.name, () => { pitchshifter.deactivate(); - expect(connectMock).toHaveBeenCalledTimes(2); - - // eslint-disable-next-line dot-notation - expect(pitchshifter['processor'].onaudioprocess).toBe(null); + expect(connectMock).toHaveBeenCalledTimes(1); pitchshifter.connect = originalConnect; diff --git a/test/SoundModule/Effectors/Stereo.test.ts b/test/SoundModule/Effectors/Stereo.test.ts index 60e78532e..f38d20e36 100644 --- a/test/SoundModule/Effectors/Stereo.test.ts +++ b/test/SoundModule/Effectors/Stereo.test.ts @@ -5,48 +5,7 @@ describe(Stereo.name, () => { const context = new AudioContextMock(); // @ts-ignore - const stereo = new Stereo(context, 2048); - - describe(stereo.start.name, () => { - test('should be false after start', () => { - // eslint-disable-next-line dot-notation - expect(stereo['paused']).toBe(true); - - stereo.activate(); - stereo.start(); - - // eslint-disable-next-line dot-notation - expect(stereo['paused']).toBe(false); - - stereo.stop(); - stereo.deactivate(); - }); - }); - - describe(stereo.stop.name, () => { - test('should call `disconnect` method and stop `onaudioprocess` event handler', () => { - // eslint-disable-next-line dot-notation - const originalProcessor = stereo['processor']; - - const disconnectMock = jest.fn(); - - // eslint-disable-next-line dot-notation - stereo['processor'].disconnect = disconnectMock; - - stereo.activate(); - stereo.stop(); - - expect(disconnectMock).toHaveBeenCalledTimes(1); - - // eslint-disable-next-line dot-notation - expect(stereo['processor'].onaudioprocess).toBe(null); - - // eslint-disable-next-line dot-notation - stereo['processor'] = originalProcessor; - - stereo.deactivate(); - }); - }); + const stereo = new Stereo(context); describe(stereo.connect.name, () => { /* eslint-disable dot-notation */ @@ -176,7 +135,13 @@ describe(Stereo.name, () => { }); describe(stereo.deactivate.name, () => { - test('should call `connect` method and stop `onaudioprocess` event handler', () => { + test('should call `connect` method', () => { + // HACK: + // eslint-disable-next-line dot-notation + if (stereo['processor'] === null) { + return; + } + const originalConnect = stereo.connect; // eslint-disable-next-line dot-notation @@ -192,10 +157,7 @@ describe(Stereo.name, () => { stereo.deactivate(); - expect(connectMock).toHaveBeenCalledTimes(2); - - // eslint-disable-next-line dot-notation - expect(stereo['processor'].onaudioprocess).toBe(null); + expect(connectMock).toHaveBeenCalledTimes(1); stereo.connect = originalConnect; diff --git a/test/SoundModule/Effectors/VocalCanceler.test.ts b/test/SoundModule/Effectors/VocalCanceler.test.ts index f7ed2a52f..187006b01 100644 --- a/test/SoundModule/Effectors/VocalCanceler.test.ts +++ b/test/SoundModule/Effectors/VocalCanceler.test.ts @@ -5,20 +5,7 @@ describe(VocalCanceler.name, () => { const context = new AudioContextMock(); // @ts-ignore - const vocalcanceler = new VocalCanceler(context, 2048); - - // eslint-disable-next-line dot-notation - describe(vocalcanceler['cancel'].name, () => { - test('should return difference between left and right channel', () => { - // eslint-disable-next-line dot-notation - expect(vocalcanceler['cancel'](1, 1)).toBeCloseTo(1, 1); - - vocalcanceler.param({ depth: 0.5 }); - - // eslint-disable-next-line dot-notation - expect(vocalcanceler['cancel'](1, 1)).toBeCloseTo(0.5, 1); - }); - }); + const vocalcanceler = new VocalCanceler(context); describe(vocalcanceler.param.name, () => { const defaultParams: VocalCancelerParams = { @@ -74,7 +61,13 @@ describe(VocalCanceler.name, () => { }); describe(vocalcanceler.deactivate.name, () => { - test('should call `connect` method and stop `onaudioprocess` event handler', () => { + test('should call `connect` method', () => { + // HACK: + // eslint-disable-next-line dot-notation + if (vocalcanceler['processor'] === null) { + return; + } + const originalConnect = vocalcanceler.connect; // eslint-disable-next-line dot-notation @@ -90,10 +83,7 @@ describe(VocalCanceler.name, () => { vocalcanceler.deactivate(); - expect(connectMock).toHaveBeenCalledTimes(2); - - // eslint-disable-next-line dot-notation - expect(vocalcanceler['processor'].onaudioprocess).toBe(null); + expect(connectMock).toHaveBeenCalledTimes(1); vocalcanceler.connect = originalConnect;