note * .octaves and ramps to note\n * over the duration of .pitchDecay.\n * @example\n * const synth = new Tone.MembraneSynth().toDestination();\n * synth.triggerAttackRelease("C2", "8n");\n * @category Instrument\n */\nclass MembraneSynth extends Synth_Synth {\n constructor() {\n super(Defaults_optionsFromArguments(MembraneSynth.getDefaults(), arguments));\n this.name = "MembraneSynth";\n /**\n * Portamento is ignored in this synth. use pitch decay instead.\n */\n this.portamento = 0;\n const options = Defaults_optionsFromArguments(MembraneSynth.getDefaults(), arguments);\n this.pitchDecay = options.pitchDecay;\n this.octaves = options.octaves;\n Interface_readOnly(this, ["oscillator", "envelope"]);\n }\n static getDefaults() {\n return Defaults_deepMerge(Monophonic_Monophonic.getDefaults(), Synth_Synth.getDefaults(), {\n envelope: {\n attack: 0.001,\n attackCurve: "exponential",\n decay: 0.4,\n release: 1.4,\n sustain: 0.01,\n },\n octaves: 10,\n oscillator: {\n type: "sine",\n },\n pitchDecay: 0.05,\n });\n }\n setNote(note, time) {\n const seconds = this.toSeconds(time);\n const hertz = this.toFrequency(note instanceof Frequency_FrequencyClass ? note.toFrequency() : note);\n const maxNote = hertz * this.octaves;\n this.oscillator.frequency.setValueAtTime(maxNote, seconds);\n this.oscillator.frequency.exponentialRampToValueAtTime(hertz, seconds + this.toSeconds(this.pitchDecay));\n return this;\n }\n dispose() {\n super.dispose();\n return this;\n }\n}\n__decorate([\n range(0)\n], MembraneSynth.prototype, "octaves", void 0);\n__decorate([\n timeRange(0)\n], MembraneSynth.prototype, "pitchDecay", void 0);\n//# sourceMappingURL=MembraneSynth.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/instrument/NoiseSynth.js\n\n\n\n\n\n\n\n/**\n * Tone.NoiseSynth is composed of [[Noise]] through an [[AmplitudeEnvelope]].\n * ```\n * +-------+ +-------------------+\n * | Noise +>--\x3e AmplitudeEnvelope +>--\x3e Output\n * +-------+ +-------------------+\n * ```\n * @example\n * const noiseSynth = new Tone.NoiseSynth().toDestination();\n * noiseSynth.triggerAttackRelease("8n", 0.05);\n * @category Instrument\n */\nclass NoiseSynth extends (/* unused pure expression or super */ null && (Instrument)) {\n constructor() {\n super(optionsFromArguments(NoiseSynth.getDefaults(), arguments));\n this.name = "NoiseSynth";\n const options = optionsFromArguments(NoiseSynth.getDefaults(), arguments);\n this.noise = new Noise(Object.assign({\n context: this.context,\n }, options.noise));\n this.envelope = new AmplitudeEnvelope(Object.assign({\n context: this.context,\n }, options.envelope));\n // connect the noise to the output\n this.noise.chain(this.envelope, this.output);\n }\n static getDefaults() {\n return Object.assign(Instrument.getDefaults(), {\n envelope: Object.assign(omitFromObject(Envelope.getDefaults(), Object.keys(ToneAudioNode.getDefaults())), {\n decay: 0.1,\n sustain: 0.0,\n }),\n noise: Object.assign(omitFromObject(Noise.getDefaults(), Object.keys(Source.getDefaults())), {\n type: "white",\n }),\n });\n }\n /**\n * Start the attack portion of the envelopes. Unlike other\n * instruments, Tone.NoiseSynth doesn\'t have a note.\n * @example\n * const noiseSynth = new Tone.NoiseSynth().toDestination();\n * noiseSynth.triggerAttack();\n */\n triggerAttack(time, velocity = 1) {\n time = this.toSeconds(time);\n // the envelopes\n this.envelope.triggerAttack(time, velocity);\n // start the noise\n this.noise.start(time);\n if (this.envelope.sustain === 0) {\n this.noise.stop(time + this.toSeconds(this.envelope.attack) + this.toSeconds(this.envelope.decay));\n }\n return this;\n }\n /**\n * Start the release portion of the envelopes.\n */\n triggerRelease(time) {\n time = this.toSeconds(time);\n this.envelope.triggerRelease(time);\n this.noise.stop(time + this.toSeconds(this.envelope.release));\n return this;\n }\n sync() {\n if (this._syncState()) {\n this._syncMethod("triggerAttack", 0);\n this._syncMethod("triggerRelease", 0);\n }\n return this;\n }\n triggerAttackRelease(duration, time, velocity = 1) {\n time = this.toSeconds(time);\n duration = this.toSeconds(duration);\n this.triggerAttack(time, velocity);\n this.triggerRelease(time + duration);\n return this;\n }\n dispose() {\n super.dispose();\n this.noise.dispose();\n this.envelope.dispose();\n return this;\n }\n}\n//# sourceMappingURL=NoiseSynth.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/core/worklet/WorkletGlobalScope.js\n/**\n * All of the classes or functions which are loaded into the AudioWorkletGlobalScope\n */\nconst workletContext = new Set();\n/**\n * Add a class to the AudioWorkletGlobalScope\n */\nfunction addToWorklet(classOrFunction) {\n workletContext.add(classOrFunction);\n}\n/**\n * Register a processor in the AudioWorkletGlobalScope with the given name\n */\nfunction registerProcessor(name, classDesc) {\n const processor = /* javascript */ `registerProcessor("${name}", ${classDesc})`;\n workletContext.add(processor);\n}\n/**\n * Get all of the modules which have been registered to the AudioWorkletGlobalScope\n */\nfunction WorkletGlobalScope_getWorkletGlobalScope() {\n return Array.from(workletContext).join("\\n");\n}\n//# sourceMappingURL=WorkletGlobalScope.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/core/worklet/ToneAudioWorklet.js\n\n\n\nclass ToneAudioWorklet_ToneAudioWorklet extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor(options) {\n super(options);\n this.name = "ToneAudioWorklet";\n /**\n * The constructor options for the node\n */\n this.workletOptions = {};\n /**\n * Callback which is invoked when there is an error in the processing\n */\n this.onprocessorerror = noOp;\n const blobUrl = URL.createObjectURL(new Blob([getWorkletGlobalScope()], { type: "text/javascript" }));\n const name = this._audioWorkletName();\n this._dummyGain = this.context.createGain();\n this._dummyParam = this._dummyGain.gain;\n // Register the processor\n this.context.addAudioWorkletModule(blobUrl, name).then(() => {\n // create the worklet when it\'s read\n if (!this.disposed) {\n this._worklet = this.context.createAudioWorkletNode(name, this.workletOptions);\n this._worklet.onprocessorerror = this.onprocessorerror.bind(this);\n this.onReady(this._worklet);\n }\n });\n }\n dispose() {\n super.dispose();\n this._dummyGain.disconnect();\n if (this._worklet) {\n this._worklet.port.postMessage("dispose");\n this._worklet.disconnect();\n }\n return this;\n }\n}\n//# sourceMappingURL=ToneAudioWorklet.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/core/worklet/ToneAudioWorkletProcessor.worklet.js\n\nconst toneAudioWorkletProcessor = /* javascript */ `\n\t/**\n\t * The base AudioWorkletProcessor for use in Tone.js. Works with the [[ToneAudioWorklet]]. \n\t */\n\tclass ToneAudioWorkletProcessor extends AudioWorkletProcessor {\n\n\t\tconstructor(options) {\n\t\t\t\n\t\t\tsuper(options);\n\t\t\t/**\n\t\t\t * If the processor was disposed or not. Keep alive until it\'s disposed.\n\t\t\t */\n\t\t\tthis.disposed = false;\n\t\t \t/** \n\t\t\t * The number of samples in the processing block\n\t\t\t */\n\t\t\tthis.blockSize = 128;\n\t\t\t/**\n\t\t\t * the sample rate\n\t\t\t */\n\t\t\tthis.sampleRate = sampleRate;\n\n\t\t\tthis.port.onmessage = (event) => {\n\t\t\t\t// when it receives a dispose \n\t\t\t\tif (event.data === "dispose") {\n\t\t\t\t\tthis.disposed = true;\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t}\n`;\naddToWorklet(toneAudioWorkletProcessor);\n//# sourceMappingURL=ToneAudioWorkletProcessor.worklet.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/core/worklet/SingleIOProcessor.worklet.js\n\n\nconst singleIOProcess = /* javascript */ `\n\t/**\n\t * Abstract class for a single input/output processor. \n\t * has a \'generate\' function which processes one sample at a time\n\t */\n\tclass SingleIOProcessor extends ToneAudioWorkletProcessor {\n\n\t\tconstructor(options) {\n\t\t\tsuper(Object.assign(options, {\n\t\t\t\tnumberOfInputs: 1,\n\t\t\t\tnumberOfOutputs: 1\n\t\t\t}));\n\t\t\t/**\n\t\t\t * Holds the name of the parameter and a single value of that\n\t\t\t * parameter at the current sample\n\t\t\t * @type { [name: string]: number }\n\t\t\t */\n\t\t\tthis.params = {}\n\t\t}\n\n\t\t/**\n\t\t * Generate an output sample from the input sample and parameters\n\t\t * @abstract\n\t\t * @param input number\n\t\t * @param channel number\n\t\t * @param parameters { [name: string]: number }\n\t\t * @returns number\n\t\t */\n\t\tgenerate(){}\n\n\t\t/**\n\t\t * Update the private params object with the \n\t\t * values of the parameters at the given index\n\t\t * @param parameters { [name: string]: Float32Array },\n\t\t * @param index number\n\t\t */\n\t\tupdateParams(parameters, index) {\n\t\t\tfor (const paramName in parameters) {\n\t\t\t\tconst param = parameters[paramName];\n\t\t\t\tif (param.length > 1) {\n\t\t\t\t\tthis.params[paramName] = parameters[paramName][index];\n\t\t\t\t} else {\n\t\t\t\t\tthis.params[paramName] = parameters[paramName][0];\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t/**\n\t\t * Process a single frame of the audio\n\t\t * @param inputs Float32Array[][]\n\t\t * @param outputs Float32Array[][]\n\t\t */\n\t\tprocess(inputs, outputs, parameters) {\n\t\t\tconst input = inputs[0];\n\t\t\tconst output = outputs[0];\n\t\t\t// get the parameter values\n\t\t\tconst channelCount = Math.max(input && input.length || 0, output.length);\n\t\t\tfor (let sample = 0; sample < this.blockSize; sample++) {\n\t\t\t\tthis.updateParams(parameters, sample);\n\t\t\t\tfor (let channel = 0; channel < channelCount; channel++) {\n\t\t\t\t\tconst inputSample = input && input.length ? input[channel][sample] : 0;\n\t\t\t\t\toutput[channel][sample] = this.generate(inputSample, channel, this.params);\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn !this.disposed;\n\t\t}\n\t};\n`;\naddToWorklet(singleIOProcess);\n//# sourceMappingURL=SingleIOProcessor.worklet.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/core/worklet/DelayLine.worklet.js\n\nconst delayLine = /* javascript */ `\n\t/**\n\t * A multichannel buffer for use within an AudioWorkletProcessor as a delay line\n\t */\n\tclass DelayLine {\n\t\t\n\t\tconstructor(size, channels) {\n\t\t\tthis.buffer = [];\n\t\t\tthis.writeHead = []\n\t\t\tthis.size = size;\n\n\t\t\t// create the empty channels\n\t\t\tfor (let i = 0; i < channels; i++) {\n\t\t\t\tthis.buffer[i] = new Float32Array(this.size);\n\t\t\t\tthis.writeHead[i] = 0;\n\t\t\t}\n\t\t}\n\n\t\t/**\n\t\t * Push a value onto the end\n\t\t * @param channel number\n\t\t * @param value number\n\t\t */\n\t\tpush(channel, value) {\n\t\t\tthis.writeHead[channel] += 1;\n\t\t\tif (this.writeHead[channel] > this.size) {\n\t\t\t\tthis.writeHead[channel] = 0;\n\t\t\t}\n\t\t\tthis.buffer[channel][this.writeHead[channel]] = value;\n\t\t}\n\n\t\t/**\n\t\t * Get the recorded value of the channel given the delay\n\t\t * @param channel number\n\t\t * @param delay number delay samples\n\t\t */\n\t\tget(channel, delay) {\n\t\t\tlet readHead = this.writeHead[channel] - Math.floor(delay);\n\t\t\tif (readHead < 0) {\n\t\t\t\treadHead += this.size;\n\t\t\t}\n\t\t\treturn this.buffer[channel][readHead];\n\t\t}\n\t}\n`;\naddToWorklet(delayLine);\n//# sourceMappingURL=DelayLine.worklet.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/filter/FeedbackCombFilter.worklet.js\n\n\n\nconst FeedbackCombFilter_worklet_workletName = "feedback-comb-filter";\nconst feedbackCombFilter = /* javascript */ `\n\tclass FeedbackCombFilterWorklet extends SingleIOProcessor {\n\n\t\tconstructor(options) {\n\t\t\tsuper(options);\n\t\t\tthis.delayLine = new DelayLine(this.sampleRate, options.channelCount || 2);\n\t\t}\n\n\t\tstatic get parameterDescriptors() {\n\t\t\treturn [{\n\t\t\t\tname: "delayTime",\n\t\t\t\tdefaultValue: 0.1,\n\t\t\t\tminValue: 0,\n\t\t\t\tmaxValue: 1,\n\t\t\t\tautomationRate: "k-rate"\n\t\t\t}, {\n\t\t\t\tname: "feedback",\n\t\t\t\tdefaultValue: 0.5,\n\t\t\t\tminValue: 0,\n\t\t\t\tmaxValue: 0.9999,\n\t\t\t\tautomationRate: "k-rate"\n\t\t\t}];\n\t\t}\n\n\t\tgenerate(input, channel, parameters) {\n\t\t\tconst delayedSample = this.delayLine.get(channel, parameters.delayTime * this.sampleRate);\n\t\t\tthis.delayLine.push(channel, input + delayedSample * parameters.feedback);\n\t\t\treturn delayedSample;\n\t\t}\n\t}\n`;\nregisterProcessor(FeedbackCombFilter_worklet_workletName, feedbackCombFilter);\n//# sourceMappingURL=FeedbackCombFilter.worklet.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/filter/FeedbackCombFilter.js\n\n\n\n\n\n\n\n/**\n * Comb filters are basic building blocks for physical modeling. Read more\n * about comb filters on [CCRMA\'s website](https://ccrma.stanford.edu/~jos/pasp/Feedback_Comb_Filters.html).\n *\n * This comb filter is implemented with the AudioWorkletNode which allows it to have feedback delays less than the\n * Web Audio processing block of 128 samples. There is a polyfill for browsers that don\'t yet support the\n * AudioWorkletNode, but it will add some latency and have slower performance than the AudioWorkletNode.\n * @category Component\n */\nclass FeedbackCombFilter_FeedbackCombFilter extends (/* unused pure expression or super */ null && (ToneAudioWorklet)) {\n constructor() {\n super(optionsFromArguments(FeedbackCombFilter_FeedbackCombFilter.getDefaults(), arguments, ["delayTime", "resonance"]));\n this.name = "FeedbackCombFilter";\n const options = optionsFromArguments(FeedbackCombFilter_FeedbackCombFilter.getDefaults(), arguments, ["delayTime", "resonance"]);\n this.input = new Gain({ context: this.context });\n this.output = new Gain({ context: this.context });\n this.delayTime = new Param({\n context: this.context,\n value: options.delayTime,\n units: "time",\n minValue: 0,\n maxValue: 1,\n param: this._dummyParam,\n swappable: true,\n });\n this.resonance = new Param({\n context: this.context,\n value: options.resonance,\n units: "normalRange",\n param: this._dummyParam,\n swappable: true,\n });\n readOnly(this, ["resonance", "delayTime"]);\n }\n _audioWorkletName() {\n return workletName;\n }\n /**\n * The default parameters\n */\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n delayTime: 0.1,\n resonance: 0.5,\n });\n }\n onReady(node) {\n connectSeries(this.input, node, this.output);\n const delayTime = node.parameters.get("delayTime");\n ;\n this.delayTime.setParam(delayTime);\n const feedback = node.parameters.get("feedback");\n ;\n this.resonance.setParam(feedback);\n }\n dispose() {\n super.dispose();\n this.input.dispose();\n this.output.dispose();\n this.delayTime.dispose();\n this.resonance.dispose();\n return this;\n }\n}\n//# sourceMappingURL=FeedbackCombFilter.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/filter/OnePoleFilter.js\n\n\n\n/**\n * A one pole filter with 6db-per-octave rolloff. Either "highpass" or "lowpass".\n * Note that changing the type or frequency may result in a discontinuity which\n * can sound like a click or pop.\n * References:\n * * http://www.earlevel.com/main/2012/12/15/a-one-pole-filter/\n * * http://www.dspguide.com/ch19/2.htm\n * * https://github.com/vitaliy-bobrov/js-rocks/blob/master/src/app/audio/effects/one-pole-filters.ts\n * @category Component\n */\nclass OnePoleFilter_OnePoleFilter extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(OnePoleFilter_OnePoleFilter.getDefaults(), arguments, ["frequency", "type"]));\n this.name = "OnePoleFilter";\n const options = optionsFromArguments(OnePoleFilter_OnePoleFilter.getDefaults(), arguments, ["frequency", "type"]);\n this._frequency = options.frequency;\n this._type = options.type;\n this.input = new Gain({ context: this.context });\n this.output = new Gain({ context: this.context });\n this._createFilter();\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n frequency: 880,\n type: "lowpass"\n });\n }\n /**\n * Create a filter and dispose the old one\n */\n _createFilter() {\n const oldFilter = this._filter;\n const freq = this.toFrequency(this._frequency);\n const t = 1 / (2 * Math.PI * freq);\n if (this._type === "lowpass") {\n const a0 = 1 / (t * this.context.sampleRate);\n const b1 = a0 - 1;\n this._filter = this.context.createIIRFilter([a0, 0], [1, b1]);\n }\n else {\n const b1 = 1 / (t * this.context.sampleRate) - 1;\n this._filter = this.context.createIIRFilter([1, -1], [1, b1]);\n }\n this.input.chain(this._filter, this.output);\n if (oldFilter) {\n // dispose it on the next block\n this.context.setTimeout(() => {\n if (!this.disposed) {\n this.input.disconnect(oldFilter);\n oldFilter.disconnect();\n }\n }, this.blockTime);\n }\n }\n /**\n * The frequency value.\n */\n get frequency() {\n return this._frequency;\n }\n set frequency(fq) {\n this._frequency = fq;\n this._createFilter();\n }\n /**\n * The OnePole Filter type, either "highpass" or "lowpass"\n */\n get type() {\n return this._type;\n }\n set type(t) {\n this._type = t;\n this._createFilter();\n }\n /**\n * Get the frequency response curve. This curve represents how the filter\n * responses to frequencies between 20hz-20khz.\n * @param len The number of values to return\n * @return The frequency response curve between 20-20kHz\n */\n getFrequencyResponse(len = 128) {\n const freqValues = new Float32Array(len);\n for (let i = 0; i < len; i++) {\n const norm = Math.pow(i / len, 2);\n const freq = norm * (20000 - 20) + 20;\n freqValues[i] = freq;\n }\n const magValues = new Float32Array(len);\n const phaseValues = new Float32Array(len);\n this._filter.getFrequencyResponse(freqValues, magValues, phaseValues);\n return magValues;\n }\n dispose() {\n super.dispose();\n this.input.dispose();\n this.output.dispose();\n this._filter.disconnect();\n return this;\n }\n}\n//# sourceMappingURL=OnePoleFilter.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/filter/LowpassCombFilter.js\n\n\n\n\n/**\n * A lowpass feedback comb filter. It is similar to\n * [[FeedbackCombFilter]], but includes a lowpass filter.\n * @category Component\n */\nclass LowpassCombFilter_LowpassCombFilter extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(LowpassCombFilter_LowpassCombFilter.getDefaults(), arguments, ["delayTime", "resonance", "dampening"]));\n this.name = "LowpassCombFilter";\n const options = optionsFromArguments(LowpassCombFilter_LowpassCombFilter.getDefaults(), arguments, ["delayTime", "resonance", "dampening"]);\n this._combFilter = this.output = new FeedbackCombFilter({\n context: this.context,\n delayTime: options.delayTime,\n resonance: options.resonance,\n });\n this.delayTime = this._combFilter.delayTime;\n this.resonance = this._combFilter.resonance;\n this._lowpass = this.input = new OnePoleFilter({\n context: this.context,\n frequency: options.dampening,\n type: "lowpass",\n });\n // connections\n this._lowpass.connect(this._combFilter);\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n dampening: 3000,\n delayTime: 0.1,\n resonance: 0.5,\n });\n }\n /**\n * The dampening control of the feedback\n */\n get dampening() {\n return this._lowpass.frequency;\n }\n set dampening(fq) {\n this._lowpass.frequency = fq;\n }\n dispose() {\n super.dispose();\n this._combFilter.dispose();\n this._lowpass.dispose();\n return this;\n }\n}\n//# sourceMappingURL=LowpassCombFilter.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/instrument/PluckSynth.js\n\n\n\n\n\n/**\n * Karplus-String string synthesis.\n * @example\n * const plucky = new Tone.PluckSynth().toDestination();\n * plucky.triggerAttack("C4", "+0.5");\n * plucky.triggerAttack("C3", "+1");\n * plucky.triggerAttack("C2", "+1.5");\n * plucky.triggerAttack("C1", "+2");\n * @category Instrument\n */\nclass PluckSynth extends (/* unused pure expression or super */ null && (Instrument)) {\n constructor() {\n super(optionsFromArguments(PluckSynth.getDefaults(), arguments));\n this.name = "PluckSynth";\n const options = optionsFromArguments(PluckSynth.getDefaults(), arguments);\n this._noise = new Noise({\n context: this.context,\n type: "pink"\n });\n this.attackNoise = options.attackNoise;\n this._lfcf = new LowpassCombFilter({\n context: this.context,\n dampening: options.dampening,\n resonance: options.resonance,\n });\n this.resonance = options.resonance;\n this.release = options.release;\n this._noise.connect(this._lfcf);\n this._lfcf.connect(this.output);\n }\n static getDefaults() {\n return deepMerge(Instrument.getDefaults(), {\n attackNoise: 1,\n dampening: 4000,\n resonance: 0.7,\n release: 1,\n });\n }\n /**\n * The dampening control. i.e. the lowpass filter frequency of the comb filter\n * @min 0\n * @max 7000\n */\n get dampening() {\n return this._lfcf.dampening;\n }\n set dampening(fq) {\n this._lfcf.dampening = fq;\n }\n triggerAttack(note, time) {\n const freq = this.toFrequency(note);\n time = this.toSeconds(time);\n const delayAmount = 1 / freq;\n this._lfcf.delayTime.setValueAtTime(delayAmount, time);\n this._noise.start(time);\n this._noise.stop(time + delayAmount * this.attackNoise);\n this._lfcf.resonance.cancelScheduledValues(time);\n this._lfcf.resonance.setValueAtTime(this.resonance, time);\n return this;\n }\n /**\n * Ramp down the [[resonance]] to 0 over the duration of the release time.\n */\n triggerRelease(time) {\n this._lfcf.resonance.linearRampTo(0, this.release, time);\n return this;\n }\n dispose() {\n super.dispose();\n this._noise.dispose();\n this._lfcf.dispose();\n return this;\n }\n}\n//# sourceMappingURL=PluckSynth.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/instrument/PolySynth.js\n\n\n\n\n\n\n/**\n * PolySynth handles voice creation and allocation for any\n * instruments passed in as the second paramter. PolySynth is\n * not a synthesizer by itself, it merely manages voices of\n * one of the other types of synths, allowing any of the\n * monophonic synthesizers to be polyphonic.\n *\n * @example\n * const synth = new Tone.PolySynth().toDestination();\n * // set the attributes across all the voices using \'set\'\n * synth.set({ detune: -1200 });\n * // play a chord\n * synth.triggerAttackRelease(["C4", "E4", "A4"], 1);\n * @category Instrument\n */\nclass PolySynth extends (/* unused pure expression or super */ null && (Instrument)) {\n constructor() {\n super(optionsFromArguments(PolySynth.getDefaults(), arguments, ["voice", "options"]));\n this.name = "PolySynth";\n /**\n * The voices which are not currently in use\n */\n this._availableVoices = [];\n /**\n * The currently active voices\n */\n this._activeVoices = [];\n /**\n * All of the allocated voices for this synth.\n */\n this._voices = [];\n /**\n * The GC timeout. Held so that it could be cancelled when the node is disposed.\n */\n this._gcTimeout = -1;\n /**\n * A moving average of the number of active voices\n */\n this._averageActiveVoices = 0;\n const options = optionsFromArguments(PolySynth.getDefaults(), arguments, ["voice", "options"]);\n // check against the old API (pre 14.3.0)\n assert(!isNumber(options.voice), "DEPRECATED: The polyphony count is no longer the first argument.");\n const defaults = options.voice.getDefaults();\n this.options = Object.assign(defaults, options.options);\n this.voice = options.voice;\n this.maxPolyphony = options.maxPolyphony;\n // create the first voice\n this._dummyVoice = this._getNextAvailableVoice();\n // remove it from the voices list\n const index = this._voices.indexOf(this._dummyVoice);\n this._voices.splice(index, 1);\n // kick off the GC interval\n this._gcTimeout = this.context.setInterval(this._collectGarbage.bind(this), 1);\n }\n static getDefaults() {\n return Object.assign(Instrument.getDefaults(), {\n maxPolyphony: 32,\n options: {},\n voice: Synth,\n });\n }\n /**\n * The number of active voices.\n */\n get activeVoices() {\n return this._activeVoices.length;\n }\n /**\n * Invoked when the source is done making sound, so that it can be\n * readded to the pool of available voices\n */\n _makeVoiceAvailable(voice) {\n this._availableVoices.push(voice);\n // remove the midi note from \'active voices\'\n const activeVoiceIndex = this._activeVoices.findIndex((e) => e.voice === voice);\n this._activeVoices.splice(activeVoiceIndex, 1);\n }\n /**\n * Get an available voice from the pool of available voices.\n * If one is not available and the maxPolyphony limit is reached,\n * steal a voice, otherwise return null.\n */\n _getNextAvailableVoice() {\n // if there are available voices, return the first one\n if (this._availableVoices.length) {\n return this._availableVoices.shift();\n }\n else if (this._voices.length < this.maxPolyphony) {\n // otherwise if there is still more maxPolyphony, make a new voice\n const voice = new this.voice(Object.assign(this.options, {\n context: this.context,\n onsilence: this._makeVoiceAvailable.bind(this),\n }));\n voice.connect(this.output);\n this._voices.push(voice);\n return voice;\n }\n else {\n warn("Max polyphony exceeded. Note dropped.");\n }\n }\n /**\n * Occasionally check if there are any allocated voices which can be cleaned up.\n */\n _collectGarbage() {\n this._averageActiveVoices = Math.max(this._averageActiveVoices * 0.95, this.activeVoices);\n if (this._availableVoices.length && this._voices.length > Math.ceil(this._averageActiveVoices + 1)) {\n // take off an available note\n const firstAvail = this._availableVoices.shift();\n const index = this._voices.indexOf(firstAvail);\n this._voices.splice(index, 1);\n if (!this.context.isOffline) {\n firstAvail.dispose();\n }\n }\n }\n /**\n * Internal method which triggers the attack\n */\n _triggerAttack(notes, time, velocity) {\n notes.forEach(note => {\n const midiNote = new MidiClass(this.context, note).toMidi();\n const voice = this._getNextAvailableVoice();\n if (voice) {\n voice.triggerAttack(note, time, velocity);\n this._activeVoices.push({\n midi: midiNote, voice, released: false,\n });\n this.log("triggerAttack", note, time);\n }\n });\n }\n /**\n * Internal method which triggers the release\n */\n _triggerRelease(notes, time) {\n notes.forEach(note => {\n const midiNote = new MidiClass(this.context, note).toMidi();\n const event = this._activeVoices.find(({ midi, released }) => midi === midiNote && !released);\n if (event) {\n // trigger release on that note\n event.voice.triggerRelease(time);\n // mark it as released\n event.released = true;\n this.log("triggerRelease", note, time);\n }\n });\n }\n /**\n * Schedule the attack/release events. If the time is in the future, then it should set a timeout\n * to wait for just-in-time scheduling\n */\n _scheduleEvent(type, notes, time, velocity) {\n assert(!this.disposed, "Synth was already disposed");\n // if the notes are greater than this amount of time in the future, they should be scheduled with setTimeout\n if (time <= this.now()) {\n // do it immediately\n if (type === "attack") {\n this._triggerAttack(notes, time, velocity);\n }\n else {\n this._triggerRelease(notes, time);\n }\n }\n else {\n // schedule it to start in the future\n this.context.setTimeout(() => {\n this._scheduleEvent(type, notes, time, velocity);\n }, time - this.now());\n }\n }\n /**\n * Trigger the attack portion of the note\n * @param notes The notes to play. Accepts a single Frequency or an array of frequencies.\n * @param time The start time of the note.\n * @param velocity The velocity of the note.\n * @example\n * const synth = new Tone.PolySynth(Tone.FMSynth).toDestination();\n * // trigger a chord immediately with a velocity of 0.2\n * synth.triggerAttack(["Ab3", "C4", "F5"], Tone.now(), 0.2);\n */\n triggerAttack(notes, time, velocity) {\n if (!Array.isArray(notes)) {\n notes = [notes];\n }\n const computedTime = this.toSeconds(time);\n this._scheduleEvent("attack", notes, computedTime, velocity);\n return this;\n }\n /**\n * Trigger the release of the note. Unlike monophonic instruments,\n * a note (or array of notes) needs to be passed in as the first argument.\n * @param notes The notes to play. Accepts a single Frequency or an array of frequencies.\n * @param time When the release will be triggered.\n * @example\n * @example\n * const poly = new Tone.PolySynth(Tone.AMSynth).toDestination();\n * poly.triggerAttack(["Ab3", "C4", "F5"]);\n * // trigger the release of the given notes.\n * poly.triggerRelease(["Ab3", "C4"], "+1");\n * poly.triggerRelease("F5", "+3");\n */\n triggerRelease(notes, time) {\n if (!Array.isArray(notes)) {\n notes = [notes];\n }\n const computedTime = this.toSeconds(time);\n this._scheduleEvent("release", notes, computedTime);\n return this;\n }\n /**\n * Trigger the attack and release after the specified duration\n * @param notes The notes to play. Accepts a single Frequency or an array of frequencies.\n * @param duration the duration of the note\n * @param time if no time is given, defaults to now\n * @param velocity the velocity of the attack (0-1)\n * @example\n * const poly = new Tone.PolySynth(Tone.AMSynth).toDestination();\n * // can pass in an array of durations as well\n * poly.triggerAttackRelease(["Eb3", "G4", "Bb4", "D5"], [4, 3, 2, 1]);\n */\n triggerAttackRelease(notes, duration, time, velocity) {\n const computedTime = this.toSeconds(time);\n this.triggerAttack(notes, computedTime, velocity);\n if (isArray(duration)) {\n assert(isArray(notes), "If the duration is an array, the notes must also be an array");\n notes = notes;\n for (let i = 0; i < notes.length; i++) {\n const d = duration[Math.min(i, duration.length - 1)];\n const durationSeconds = this.toSeconds(d);\n assert(durationSeconds > 0, "The duration must be greater than 0");\n this.triggerRelease(notes[i], computedTime + durationSeconds);\n }\n }\n else {\n const durationSeconds = this.toSeconds(duration);\n assert(durationSeconds > 0, "The duration must be greater than 0");\n this.triggerRelease(notes, computedTime + durationSeconds);\n }\n return this;\n }\n sync() {\n if (this._syncState()) {\n this._syncMethod("triggerAttack", 1);\n this._syncMethod("triggerRelease", 1);\n }\n return this;\n }\n /**\n * Set a member/attribute of the voices\n * @example\n * const poly = new Tone.PolySynth().toDestination();\n * // set all of the voices using an options object for the synth type\n * poly.set({\n * \tenvelope: {\n * \t\tattack: 0.25\n * \t}\n * });\n * poly.triggerAttackRelease("Bb3", 0.2);\n */\n set(options) {\n // remove options which are controlled by the PolySynth\n const sanitizedOptions = omitFromObject(options, ["onsilence", "context"]);\n // store all of the options\n this.options = deepMerge(this.options, sanitizedOptions);\n this._voices.forEach(voice => voice.set(sanitizedOptions));\n this._dummyVoice.set(sanitizedOptions);\n return this;\n }\n get() {\n return this._dummyVoice.get();\n }\n /**\n * Trigger the release portion of all the currently active voices immediately.\n * Useful for silencing the synth.\n */\n releaseAll(time) {\n const computedTime = this.toSeconds(time);\n this._activeVoices.forEach(({ voice }) => {\n voice.triggerRelease(computedTime);\n });\n return this;\n }\n dispose() {\n super.dispose();\n this._dummyVoice.dispose();\n this._voices.forEach(v => v.dispose());\n this._activeVoices = [];\n this._availableVoices = [];\n this.context.clearInterval(this._gcTimeout);\n return this;\n }\n}\n//# sourceMappingURL=PolySynth.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/instrument/Sampler.js\n\n\n\n\n\n\n\n\n\n\n\n/**\n * Pass in an object which maps the note\'s pitch or midi value to the url,\n * then you can trigger the attack and release of that note like other instruments.\n * By automatically repitching the samples, it is possible to play pitches which\n * were not explicitly included which can save loading time.\n *\n * For sample or buffer playback where repitching is not necessary,\n * use [[Player]].\n * @example\n * const sampler = new Tone.Sampler({\n * \turls: {\n * \t\tA1: "A1.mp3",\n * \t\tA2: "A2.mp3",\n * \t},\n * \tbaseUrl: "https://tonejs.github.io/audio/casio/",\n * \tonload: () => {\n * \t\tsampler.triggerAttackRelease(["C1", "E1", "G1", "B1"], 0.5);\n * \t}\n * }).toDestination();\n * @category Instrument\n */\nclass Sampler extends Instrument_Instrument {\n constructor() {\n super(Defaults_optionsFromArguments(Sampler.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls"));\n this.name = "Sampler";\n /**\n * The object of all currently playing BufferSources\n */\n this._activeSources = new Map();\n const options = Defaults_optionsFromArguments(Sampler.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls");\n const urlMap = {};\n Object.keys(options.urls).forEach((note) => {\n const noteNumber = parseInt(note, 10);\n Debug_assert(isNote(note)\n || (TypeCheck_isNumber(noteNumber) && isFinite(noteNumber)), `url key is neither a note or midi pitch: ${note}`);\n if (isNote(note)) {\n // convert the note name to MIDI\n const mid = new Frequency_FrequencyClass(this.context, note).toMidi();\n urlMap[mid] = options.urls[note];\n }\n else if (TypeCheck_isNumber(noteNumber) && isFinite(noteNumber)) {\n // otherwise if it\'s numbers assume it\'s midi\n urlMap[noteNumber] = options.urls[noteNumber];\n }\n });\n this._buffers = new ToneAudioBuffers_ToneAudioBuffers({\n urls: urlMap,\n onload: options.onload,\n baseUrl: options.baseUrl,\n onerror: options.onerror,\n });\n this.attack = options.attack;\n this.release = options.release;\n this.curve = options.curve;\n // invoke the callback if it\'s already loaded\n if (this._buffers.loaded) {\n // invoke onload deferred\n Promise.resolve().then(options.onload);\n }\n }\n static getDefaults() {\n return Object.assign(Instrument_Instrument.getDefaults(), {\n attack: 0,\n baseUrl: "",\n curve: "exponential",\n onload: Interface_noOp,\n onerror: Interface_noOp,\n release: 0.1,\n urls: {},\n });\n }\n /**\n * Returns the difference in steps between the given midi note at the closets sample.\n */\n _findClosest(midi) {\n // searches within 8 octaves of the given midi note\n const MAX_INTERVAL = 96;\n let interval = 0;\n while (interval < MAX_INTERVAL) {\n // check above and below\n if (this._buffers.has(midi + interval)) {\n return -interval;\n }\n else if (this._buffers.has(midi - interval)) {\n return interval;\n }\n interval++;\n }\n throw new Error(`No available buffers for note: ${midi}`);\n }\n /**\n * @param notes\tThe note to play, or an array of notes.\n * @param time When to play the note\n * @param velocity The velocity to play the sample back.\n */\n triggerAttack(notes, time, velocity = 1) {\n this.log("triggerAttack", notes, time, velocity);\n if (!Array.isArray(notes)) {\n notes = [notes];\n }\n notes.forEach(note => {\n const midiFloat = ftomf(new Frequency_FrequencyClass(this.context, note).toFrequency());\n const midi = Math.round(midiFloat);\n const remainder = midiFloat - midi;\n // find the closest note pitch\n const difference = this._findClosest(midi);\n const closestNote = midi - difference;\n const buffer = this._buffers.get(closestNote);\n const playbackRate = Conversions_intervalToFrequencyRatio(difference + remainder);\n // play that note\n const source = new ToneBufferSource_ToneBufferSource({\n url: buffer,\n context: this.context,\n curve: this.curve,\n fadeIn: this.attack,\n fadeOut: this.release,\n playbackRate,\n }).connect(this.output);\n source.start(time, 0, buffer.duration / playbackRate, velocity);\n // add it to the active sources\n if (!TypeCheck_isArray(this._activeSources.get(midi))) {\n this._activeSources.set(midi, []);\n }\n this._activeSources.get(midi).push(source);\n // remove it when it\'s done\n source.onended = () => {\n if (this._activeSources && this._activeSources.has(midi)) {\n const sources = this._activeSources.get(midi);\n const index = sources.indexOf(source);\n if (index !== -1) {\n sources.splice(index, 1);\n }\n }\n };\n });\n return this;\n }\n /**\n * @param notes\tThe note to release, or an array of notes.\n * @param time \tWhen to release the note.\n */\n triggerRelease(notes, time) {\n this.log("triggerRelease", notes, time);\n if (!Array.isArray(notes)) {\n notes = [notes];\n }\n notes.forEach(note => {\n const midi = new Frequency_FrequencyClass(this.context, note).toMidi();\n // find the note\n if (this._activeSources.has(midi) && this._activeSources.get(midi).length) {\n const sources = this._activeSources.get(midi);\n time = this.toSeconds(time);\n sources.forEach(source => {\n source.stop(time);\n });\n this._activeSources.set(midi, []);\n }\n });\n return this;\n }\n /**\n * Release all currently active notes.\n * @param time \tWhen to release the notes.\n */\n releaseAll(time) {\n const computedTime = this.toSeconds(time);\n this._activeSources.forEach(sources => {\n while (sources.length) {\n const source = sources.shift();\n source.stop(computedTime);\n }\n });\n return this;\n }\n sync() {\n if (this._syncState()) {\n this._syncMethod("triggerAttack", 1);\n this._syncMethod("triggerRelease", 1);\n }\n return this;\n }\n /**\n * Invoke the attack phase, then after the duration, invoke the release.\n * @param notes\tThe note to play and release, or an array of notes.\n * @param duration The time the note should be held\n * @param time When to start the attack\n * @param velocity The velocity of the attack\n */\n triggerAttackRelease(notes, duration, time, velocity = 1) {\n const computedTime = this.toSeconds(time);\n this.triggerAttack(notes, computedTime, velocity);\n if (TypeCheck_isArray(duration)) {\n Debug_assert(TypeCheck_isArray(notes), "notes must be an array when duration is array");\n notes.forEach((note, index) => {\n const d = duration[Math.min(index, duration.length - 1)];\n this.triggerRelease(note, computedTime + this.toSeconds(d));\n });\n }\n else {\n this.triggerRelease(notes, computedTime + this.toSeconds(duration));\n }\n return this;\n }\n /**\n * Add a note to the sampler.\n * @param note The buffer\'s pitch.\n * @param url Either the url of the buffer, or a buffer which will be added with the given name.\n * @param callback The callback to invoke when the url is loaded.\n */\n add(note, url, callback) {\n Debug_assert(isNote(note) || isFinite(note), `note must be a pitch or midi: ${note}`);\n if (isNote(note)) {\n // convert the note name to MIDI\n const mid = new Frequency_FrequencyClass(this.context, note).toMidi();\n this._buffers.add(mid, url, callback);\n }\n else {\n // otherwise if it\'s numbers assume it\'s midi\n this._buffers.add(note, url, callback);\n }\n return this;\n }\n /**\n * If the buffers are loaded or not\n */\n get loaded() {\n return this._buffers.loaded;\n }\n /**\n * Clean up\n */\n dispose() {\n super.dispose();\n this._buffers.dispose();\n this._activeSources.forEach(sources => {\n sources.forEach(source => source.dispose());\n });\n this._activeSources.clear();\n return this;\n }\n}\n__decorate([\n timeRange(0)\n], Sampler.prototype, "attack", void 0);\n__decorate([\n timeRange(0)\n], Sampler.prototype, "release", void 0);\n//# sourceMappingURL=Sampler.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/instrument/index.js\n\n\n\n\n\n\n\n\n\n\n\n//# sourceMappingURL=index.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/ToneEvent.js\n\n\n\n\n\n\n\n/**\n * ToneEvent abstracts away this.context.transport.schedule and provides a schedulable\n * callback for a single or repeatable events along the timeline.\n *\n * @example\n * const synth = new Tone.PolySynth().toDestination();\n * const chordEvent = new Tone.ToneEvent(((time, chord) => {\n * \t// the chord as well as the exact time of the event\n * \t// are passed in as arguments to the callback function\n * \tsynth.triggerAttackRelease(chord, 0.5, time);\n * }), ["D4", "E4", "F4"]);\n * // start the chord at the beginning of the transport timeline\n * chordEvent.start();\n * // loop it every measure for 8 measures\n * chordEvent.loop = 8;\n * chordEvent.loopEnd = "1m";\n * @category Event\n */\nclass ToneEvent_ToneEvent extends (/* unused pure expression or super */ null && (ToneWithContext)) {\n constructor() {\n super(optionsFromArguments(ToneEvent_ToneEvent.getDefaults(), arguments, ["callback", "value"]));\n this.name = "ToneEvent";\n /**\n * Tracks the scheduled events\n */\n this._state = new StateTimeline("stopped");\n /**\n * A delay time from when the event is scheduled to start\n */\n this._startOffset = 0;\n const options = optionsFromArguments(ToneEvent_ToneEvent.getDefaults(), arguments, ["callback", "value"]);\n this._loop = options.loop;\n this.callback = options.callback;\n this.value = options.value;\n this._loopStart = this.toTicks(options.loopStart);\n this._loopEnd = this.toTicks(options.loopEnd);\n this._playbackRate = options.playbackRate;\n this._probability = options.probability;\n this._humanize = options.humanize;\n this.mute = options.mute;\n this._playbackRate = options.playbackRate;\n this._state.increasing = true;\n // schedule the events for the first time\n this._rescheduleEvents();\n }\n static getDefaults() {\n return Object.assign(ToneWithContext.getDefaults(), {\n callback: noOp,\n humanize: false,\n loop: false,\n loopEnd: "1m",\n loopStart: 0,\n mute: false,\n playbackRate: 1,\n probability: 1,\n value: null,\n });\n }\n /**\n * Reschedule all of the events along the timeline\n * with the updated values.\n * @param after Only reschedules events after the given time.\n */\n _rescheduleEvents(after = -1) {\n // if no argument is given, schedules all of the events\n this._state.forEachFrom(after, event => {\n let duration;\n if (event.state === "started") {\n if (event.id !== -1) {\n this.context.transport.clear(event.id);\n }\n const startTick = event.time + Math.round(this.startOffset / this._playbackRate);\n if (this._loop === true || isNumber(this._loop) && this._loop > 1) {\n duration = Infinity;\n if (isNumber(this._loop)) {\n duration = (this._loop) * this._getLoopDuration();\n }\n const nextEvent = this._state.getAfter(startTick);\n if (nextEvent !== null) {\n duration = Math.min(duration, nextEvent.time - startTick);\n }\n if (duration !== Infinity) {\n // schedule a stop since it\'s finite duration\n this._state.setStateAtTime("stopped", startTick + duration + 1, { id: -1 });\n duration = new TicksClass(this.context, duration);\n }\n const interval = new TicksClass(this.context, this._getLoopDuration());\n event.id = this.context.transport.scheduleRepeat(this._tick.bind(this), interval, new TicksClass(this.context, startTick), duration);\n }\n else {\n event.id = this.context.transport.schedule(this._tick.bind(this), new TicksClass(this.context, startTick));\n }\n }\n });\n }\n /**\n * Returns the playback state of the note, either "started" or "stopped".\n */\n get state() {\n return this._state.getValueAtTime(this.context.transport.ticks);\n }\n /**\n * The start from the scheduled start time.\n */\n get startOffset() {\n return this._startOffset;\n }\n set startOffset(offset) {\n this._startOffset = offset;\n }\n /**\n * The probability of the notes being triggered.\n */\n get probability() {\n return this._probability;\n }\n set probability(prob) {\n this._probability = prob;\n }\n /**\n * If set to true, will apply small random variation\n * to the callback time. If the value is given as a time, it will randomize\n * by that amount.\n * @example\n * const event = new Tone.ToneEvent();\n * event.humanize = true;\n */\n get humanize() {\n return this._humanize;\n }\n set humanize(variation) {\n this._humanize = variation;\n }\n /**\n * Start the note at the given time.\n * @param time When the event should start.\n */\n start(time) {\n const ticks = this.toTicks(time);\n if (this._state.getValueAtTime(ticks) === "stopped") {\n this._state.add({\n id: -1,\n state: "started",\n time: ticks,\n });\n this._rescheduleEvents(ticks);\n }\n return this;\n }\n /**\n * Stop the Event at the given time.\n * @param time When the event should stop.\n */\n stop(time) {\n this.cancel(time);\n const ticks = this.toTicks(time);\n if (this._state.getValueAtTime(ticks) === "started") {\n this._state.setStateAtTime("stopped", ticks, { id: -1 });\n const previousEvent = this._state.getBefore(ticks);\n let reschedulTime = ticks;\n if (previousEvent !== null) {\n reschedulTime = previousEvent.time;\n }\n this._rescheduleEvents(reschedulTime);\n }\n return this;\n }\n /**\n * Cancel all scheduled events greater than or equal to the given time\n * @param time The time after which events will be cancel.\n */\n cancel(time) {\n time = defaultArg(time, -Infinity);\n const ticks = this.toTicks(time);\n this._state.forEachFrom(ticks, event => {\n this.context.transport.clear(event.id);\n });\n this._state.cancel(ticks);\n return this;\n }\n /**\n * The callback function invoker. Also\n * checks if the Event is done playing\n * @param time The time of the event in seconds\n */\n _tick(time) {\n const ticks = this.context.transport.getTicksAtTime(time);\n if (!this.mute && this._state.getValueAtTime(ticks) === "started") {\n if (this.probability < 1 && Math.random() > this.probability) {\n return;\n }\n if (this.humanize) {\n let variation = 0.02;\n if (!isBoolean(this.humanize)) {\n variation = this.toSeconds(this.humanize);\n }\n time += (Math.random() * 2 - 1) * variation;\n }\n this.callback(time, this.value);\n }\n }\n /**\n * Get the duration of the loop.\n */\n _getLoopDuration() {\n return Math.round((this._loopEnd - this._loopStart) / this._playbackRate);\n }\n /**\n * If the note should loop or not\n * between ToneEvent.loopStart and\n * ToneEvent.loopEnd. If set to true,\n * the event will loop indefinitely,\n * if set to a number greater than 1\n * it will play a specific number of\n * times, if set to false, 0 or 1, the\n * part will only play once.\n */\n get loop() {\n return this._loop;\n }\n set loop(loop) {\n this._loop = loop;\n this._rescheduleEvents();\n }\n /**\n * The playback rate of the note. Defaults to 1.\n * @example\n * const note = new Tone.ToneEvent();\n * note.loop = true;\n * // repeat the note twice as fast\n * note.playbackRate = 2;\n */\n get playbackRate() {\n return this._playbackRate;\n }\n set playbackRate(rate) {\n this._playbackRate = rate;\n this._rescheduleEvents();\n }\n /**\n * The loopEnd point is the time the event will loop\n * if ToneEvent.loop is true.\n */\n get loopEnd() {\n return new TicksClass(this.context, this._loopEnd).toSeconds();\n }\n set loopEnd(loopEnd) {\n this._loopEnd = this.toTicks(loopEnd);\n if (this._loop) {\n this._rescheduleEvents();\n }\n }\n /**\n * The time when the loop should start.\n */\n get loopStart() {\n return new TicksClass(this.context, this._loopStart).toSeconds();\n }\n set loopStart(loopStart) {\n this._loopStart = this.toTicks(loopStart);\n if (this._loop) {\n this._rescheduleEvents();\n }\n }\n /**\n * The current progress of the loop interval.\n * Returns 0 if the event is not started yet or\n * it is not set to loop.\n */\n get progress() {\n if (this._loop) {\n const ticks = this.context.transport.ticks;\n const lastEvent = this._state.get(ticks);\n if (lastEvent !== null && lastEvent.state === "started") {\n const loopDuration = this._getLoopDuration();\n const progress = (ticks - lastEvent.time) % loopDuration;\n return progress / loopDuration;\n }\n else {\n return 0;\n }\n }\n else {\n return 0;\n }\n }\n dispose() {\n super.dispose();\n this.cancel();\n this._state.dispose();\n return this;\n }\n}\n//# sourceMappingURL=ToneEvent.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/Loop.js\n\n\n\n\n/**\n * Loop creates a looped callback at the\n * specified interval. The callback can be\n * started, stopped and scheduled along\n * the Transport\'s timeline.\n * @example\n * const loop = new Tone.Loop((time) => {\n * \t// triggered every eighth note.\n * \tconsole.log(time);\n * }, "8n").start(0);\n * Tone.Transport.start();\n * @category Event\n */\nclass Loop_Loop extends (/* unused pure expression or super */ null && (ToneWithContext)) {\n constructor() {\n super(optionsFromArguments(Loop_Loop.getDefaults(), arguments, ["callback", "interval"]));\n this.name = "Loop";\n const options = optionsFromArguments(Loop_Loop.getDefaults(), arguments, ["callback", "interval"]);\n this._event = new ToneEvent({\n context: this.context,\n callback: this._tick.bind(this),\n loop: true,\n loopEnd: options.interval,\n playbackRate: options.playbackRate,\n probability: options.probability\n });\n this.callback = options.callback;\n // set the iterations\n this.iterations = options.iterations;\n }\n static getDefaults() {\n return Object.assign(ToneWithContext.getDefaults(), {\n interval: "4n",\n callback: noOp,\n playbackRate: 1,\n iterations: Infinity,\n probability: 1,\n mute: false,\n humanize: false\n });\n }\n /**\n * Start the loop at the specified time along the Transport\'s timeline.\n * @param time When to start the Loop.\n */\n start(time) {\n this._event.start(time);\n return this;\n }\n /**\n * Stop the loop at the given time.\n * @param time When to stop the Loop.\n */\n stop(time) {\n this._event.stop(time);\n return this;\n }\n /**\n * Cancel all scheduled events greater than or equal to the given time\n * @param time The time after which events will be cancel.\n */\n cancel(time) {\n this._event.cancel(time);\n return this;\n }\n /**\n * Internal function called when the notes should be called\n * @param time The time the event occurs\n */\n _tick(time) {\n this.callback(time);\n }\n /**\n * The state of the Loop, either started or stopped.\n */\n get state() {\n return this._event.state;\n }\n /**\n * The progress of the loop as a value between 0-1. 0, when the loop is stopped or done iterating.\n */\n get progress() {\n return this._event.progress;\n }\n /**\n * The time between successive callbacks.\n * @example\n * const loop = new Tone.Loop();\n * loop.interval = "8n"; // loop every 8n\n */\n get interval() {\n return this._event.loopEnd;\n }\n set interval(interval) {\n this._event.loopEnd = interval;\n }\n /**\n * The playback rate of the loop. The normal playback rate is 1 (no change).\n * A `playbackRate` of 2 would be twice as fast.\n */\n get playbackRate() {\n return this._event.playbackRate;\n }\n set playbackRate(rate) {\n this._event.playbackRate = rate;\n }\n /**\n * Random variation +/-0.01s to the scheduled time.\n * Or give it a time value which it will randomize by.\n */\n get humanize() {\n return this._event.humanize;\n }\n set humanize(variation) {\n this._event.humanize = variation;\n }\n /**\n * The probably of the callback being invoked.\n */\n get probability() {\n return this._event.probability;\n }\n set probability(prob) {\n this._event.probability = prob;\n }\n /**\n * Muting the Loop means that no callbacks are invoked.\n */\n get mute() {\n return this._event.mute;\n }\n set mute(mute) {\n this._event.mute = mute;\n }\n /**\n * The number of iterations of the loop. The default value is `Infinity` (loop forever).\n */\n get iterations() {\n if (this._event.loop === true) {\n return Infinity;\n }\n else {\n return this._event.loop;\n }\n }\n set iterations(iters) {\n if (iters === Infinity) {\n this._event.loop = true;\n }\n else {\n this._event.loop = iters;\n }\n }\n dispose() {\n super.dispose();\n this._event.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Loop.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/Part.js\n\n\n\n\n\n\n/**\n * Part is a collection ToneEvents which can be started/stopped and looped as a single unit.\n *\n * @example\n * const synth = new Tone.Synth().toDestination();\n * const part = new Tone.Part(((time, note) => {\n * \t// the notes given as the second element in the array\n * \t// will be passed in as the second argument\n * \tsynth.triggerAttackRelease(note, "8n", time);\n * }), [[0, "C2"], ["0:2", "C3"], ["0:3:2", "G2"]]);\n * Tone.Transport.start();\n * @example\n * const synth = new Tone.Synth().toDestination();\n * // use an array of objects as long as the object has a "time" attribute\n * const part = new Tone.Part(((time, value) => {\n * \t// the value is an object which contains both the note and the velocity\n * \tsynth.triggerAttackRelease(value.note, "8n", time, value.velocity);\n * }), [{ time: 0, note: "C3", velocity: 0.9 },\n * \t{ time: "0:2", note: "C4", velocity: 0.5 }\n * ]).start(0);\n * Tone.Transport.start();\n * @category Event\n */\nclass Part_Part extends (/* unused pure expression or super */ null && (ToneEvent)) {\n constructor() {\n super(optionsFromArguments(Part_Part.getDefaults(), arguments, ["callback", "events"]));\n this.name = "Part";\n /**\n * Tracks the scheduled events\n */\n this._state = new StateTimeline("stopped");\n /**\n * The events that belong to this part\n */\n this._events = new Set();\n const options = optionsFromArguments(Part_Part.getDefaults(), arguments, ["callback", "events"]);\n // make sure things are assigned in the right order\n this._state.increasing = true;\n // add the events\n options.events.forEach(event => {\n if (isArray(event)) {\n this.add(event[0], event[1]);\n }\n else {\n this.add(event);\n }\n });\n }\n static getDefaults() {\n return Object.assign(ToneEvent.getDefaults(), {\n events: [],\n });\n }\n /**\n * Start the part at the given time.\n * @param time When to start the part.\n * @param offset The offset from the start of the part to begin playing at.\n */\n start(time, offset) {\n const ticks = this.toTicks(time);\n if (this._state.getValueAtTime(ticks) !== "started") {\n offset = defaultArg(offset, this._loop ? this._loopStart : 0);\n if (this._loop) {\n offset = defaultArg(offset, this._loopStart);\n }\n else {\n offset = defaultArg(offset, 0);\n }\n const computedOffset = this.toTicks(offset);\n this._state.add({\n id: -1,\n offset: computedOffset,\n state: "started",\n time: ticks,\n });\n this._forEach(event => {\n this._startNote(event, ticks, computedOffset);\n });\n }\n return this;\n }\n /**\n * Start the event in the given event at the correct time given\n * the ticks and offset and looping.\n * @param event\n * @param ticks\n * @param offset\n */\n _startNote(event, ticks, offset) {\n ticks -= offset;\n if (this._loop) {\n if (event.startOffset >= this._loopStart && event.startOffset < this._loopEnd) {\n if (event.startOffset < offset) {\n // start it on the next loop\n ticks += this._getLoopDuration();\n }\n event.start(new TicksClass(this.context, ticks));\n }\n else if (event.startOffset < this._loopStart && event.startOffset >= offset) {\n event.loop = false;\n event.start(new TicksClass(this.context, ticks));\n }\n }\n else if (event.startOffset >= offset) {\n event.start(new TicksClass(this.context, ticks));\n }\n }\n get startOffset() {\n return this._startOffset;\n }\n set startOffset(offset) {\n this._startOffset = offset;\n this._forEach(event => {\n event.startOffset += this._startOffset;\n });\n }\n /**\n * Stop the part at the given time.\n * @param time When to stop the part.\n */\n stop(time) {\n const ticks = this.toTicks(time);\n this._state.cancel(ticks);\n this._state.setStateAtTime("stopped", ticks);\n this._forEach(event => {\n event.stop(time);\n });\n return this;\n }\n /**\n * Get/Set an Event\'s value at the given time.\n * If a value is passed in and no event exists at\n * the given time, one will be created with that value.\n * If two events are at the same time, the first one will\n * be returned.\n * @example\n * const part = new Tone.Part();\n * part.at("1m"); // returns the part at the first measure\n * part.at("2m", "C2"); // set the value at "2m" to C2.\n * // if an event didn\'t exist at that time, it will be created.\n * @param time The time of the event to get or set.\n * @param value If a value is passed in, the value of the event at the given time will be set to it.\n */\n at(time, value) {\n const timeInTicks = new TransportTimeClass(this.context, time).toTicks();\n const tickTime = new TicksClass(this.context, 1).toSeconds();\n const iterator = this._events.values();\n let result = iterator.next();\n while (!result.done) {\n const event = result.value;\n if (Math.abs(timeInTicks - event.startOffset) < tickTime) {\n if (isDefined(value)) {\n event.value = value;\n }\n return event;\n }\n result = iterator.next();\n }\n // if there was no event at that time, create one\n if (isDefined(value)) {\n this.add(time, value);\n // return the new event\n return this.at(time);\n }\n else {\n return null;\n }\n }\n add(time, value) {\n // extract the parameters\n if (time instanceof Object && Reflect.has(time, "time")) {\n value = time;\n time = value.time;\n }\n const ticks = this.toTicks(time);\n let event;\n if (value instanceof ToneEvent) {\n event = value;\n event.callback = this._tick.bind(this);\n }\n else {\n event = new ToneEvent({\n callback: this._tick.bind(this),\n context: this.context,\n value,\n });\n }\n // the start offset\n event.startOffset = ticks;\n // initialize the values\n event.set({\n humanize: this.humanize,\n loop: this.loop,\n loopEnd: this.loopEnd,\n loopStart: this.loopStart,\n playbackRate: this.playbackRate,\n probability: this.probability,\n });\n this._events.add(event);\n // start the note if it should be played right now\n this._restartEvent(event);\n return this;\n }\n /**\n * Restart the given event\n */\n _restartEvent(event) {\n this._state.forEach((stateEvent) => {\n if (stateEvent.state === "started") {\n this._startNote(event, stateEvent.time, stateEvent.offset);\n }\n else {\n // stop the note\n event.stop(new TicksClass(this.context, stateEvent.time));\n }\n });\n }\n remove(time, value) {\n // extract the parameters\n if (isObject(time) && time.hasOwnProperty("time")) {\n value = time;\n time = value.time;\n }\n time = this.toTicks(time);\n this._events.forEach(event => {\n if (event.startOffset === time) {\n if (isUndef(value) || (isDefined(value) && event.value === value)) {\n this._events.delete(event);\n event.dispose();\n }\n }\n });\n return this;\n }\n /**\n * Remove all of the notes from the group.\n */\n clear() {\n this._forEach(event => event.dispose());\n this._events.clear();\n return this;\n }\n /**\n * Cancel scheduled state change events: i.e. "start" and "stop".\n * @param after The time after which to cancel the scheduled events.\n */\n cancel(after) {\n this._forEach(event => event.cancel(after));\n this._state.cancel(this.toTicks(after));\n return this;\n }\n /**\n * Iterate over all of the events\n */\n _forEach(callback) {\n if (this._events) {\n this._events.forEach(event => {\n if (event instanceof Part_Part) {\n event._forEach(callback);\n }\n else {\n callback(event);\n }\n });\n }\n return this;\n }\n /**\n * Set the attribute of all of the events\n * @param attr the attribute to set\n * @param value The value to set it to\n */\n _setAll(attr, value) {\n this._forEach(event => {\n event[attr] = value;\n });\n }\n /**\n * Internal tick method\n * @param time The time of the event in seconds\n */\n _tick(time, value) {\n if (!this.mute) {\n this.callback(time, value);\n }\n }\n /**\n * Determine if the event should be currently looping\n * given the loop boundries of this Part.\n * @param event The event to test\n */\n _testLoopBoundries(event) {\n if (this._loop && (event.startOffset < this._loopStart || event.startOffset >= this._loopEnd)) {\n event.cancel(0);\n }\n else if (event.state === "stopped") {\n // reschedule it if it\'s stopped\n this._restartEvent(event);\n }\n }\n get probability() {\n return this._probability;\n }\n set probability(prob) {\n this._probability = prob;\n this._setAll("probability", prob);\n }\n get humanize() {\n return this._humanize;\n }\n set humanize(variation) {\n this._humanize = variation;\n this._setAll("humanize", variation);\n }\n /**\n * If the part should loop or not\n * between Part.loopStart and\n * Part.loopEnd. If set to true,\n * the part will loop indefinitely,\n * if set to a number greater than 1\n * it will play a specific number of\n * times, if set to false, 0 or 1, the\n * part will only play once.\n * @example\n * const part = new Tone.Part();\n * // loop the part 8 times\n * part.loop = 8;\n */\n get loop() {\n return this._loop;\n }\n set loop(loop) {\n this._loop = loop;\n this._forEach(event => {\n event.loopStart = this.loopStart;\n event.loopEnd = this.loopEnd;\n event.loop = loop;\n this._testLoopBoundries(event);\n });\n }\n /**\n * The loopEnd point determines when it will\n * loop if Part.loop is true.\n */\n get loopEnd() {\n return new TicksClass(this.context, this._loopEnd).toSeconds();\n }\n set loopEnd(loopEnd) {\n this._loopEnd = this.toTicks(loopEnd);\n if (this._loop) {\n this._forEach(event => {\n event.loopEnd = loopEnd;\n this._testLoopBoundries(event);\n });\n }\n }\n /**\n * The loopStart point determines when it will\n * loop if Part.loop is true.\n */\n get loopStart() {\n return new TicksClass(this.context, this._loopStart).toSeconds();\n }\n set loopStart(loopStart) {\n this._loopStart = this.toTicks(loopStart);\n if (this._loop) {\n this._forEach(event => {\n event.loopStart = this.loopStart;\n this._testLoopBoundries(event);\n });\n }\n }\n /**\n * The playback rate of the part\n */\n get playbackRate() {\n return this._playbackRate;\n }\n set playbackRate(rate) {\n this._playbackRate = rate;\n this._setAll("playbackRate", rate);\n }\n /**\n * The number of scheduled notes in the part.\n */\n get length() {\n return this._events.size;\n }\n dispose() {\n super.dispose();\n this.clear();\n return this;\n }\n}\n//# sourceMappingURL=Part.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/PatternGenerator.js\n\n\n/**\n * Start at the first value and go up to the last\n */\nfunction* upPatternGen(values) {\n let index = 0;\n while (index < values.length) {\n index = clampToArraySize(index, values);\n yield values[index];\n index++;\n }\n}\n/**\n * Start at the last value and go down to 0\n */\nfunction* downPatternGen(values) {\n let index = values.length - 1;\n while (index >= 0) {\n index = clampToArraySize(index, values);\n yield values[index];\n index--;\n }\n}\n/**\n * Infinitely yield the generator\n */\nfunction* infiniteGen(values, gen) {\n while (true) {\n yield* gen(values);\n }\n}\n/**\n * Make sure that the index is in the given range\n */\nfunction clampToArraySize(index, values) {\n return clamp(index, 0, values.length - 1);\n}\n/**\n * Alternate between two generators\n */\nfunction* alternatingGenerator(values, directionUp) {\n let index = directionUp ? 0 : values.length - 1;\n while (true) {\n index = clampToArraySize(index, values);\n yield values[index];\n if (directionUp) {\n index++;\n if (index >= values.length - 1) {\n directionUp = false;\n }\n }\n else {\n index--;\n if (index <= 0) {\n directionUp = true;\n }\n }\n }\n}\n/**\n * Starting from the bottom move up 2, down 1\n */\nfunction* jumpUp(values) {\n let index = 0;\n let stepIndex = 0;\n while (index < values.length) {\n index = clampToArraySize(index, values);\n yield values[index];\n stepIndex++;\n index += (stepIndex % 2 ? 2 : -1);\n }\n}\n/**\n * Starting from the top move down 2, up 1\n */\nfunction* jumpDown(values) {\n let index = values.length - 1;\n let stepIndex = 0;\n while (index >= 0) {\n index = clampToArraySize(index, values);\n yield values[index];\n stepIndex++;\n index += (stepIndex % 2 ? -2 : 1);\n }\n}\n/**\n * Choose a random index each time\n */\nfunction* randomGen(values) {\n while (true) {\n const randomIndex = Math.floor(Math.random() * values.length);\n yield values[randomIndex];\n }\n}\n/**\n * Randomly go through all of the values once before choosing a new random order\n */\nfunction* randomOnce(values) {\n // create an array of indices\n const copy = [];\n for (let i = 0; i < values.length; i++) {\n copy.push(i);\n }\n while (copy.length > 0) {\n // random choose an index, and then remove it so it\'s not chosen again\n const randVal = copy.splice(Math.floor(copy.length * Math.random()), 1);\n const index = clampToArraySize(randVal[0], values);\n yield values[index];\n }\n}\n/**\n * Randomly choose to walk up or down 1 index in the values array\n */\nfunction* randomWalk(values) {\n // randomly choose a starting index in the values array\n let index = Math.floor(Math.random() * values.length);\n while (true) {\n if (index === 0) {\n index++; // at bottom of array, so force upward step\n }\n else if (index === values.length - 1) {\n index--; // at top of array, so force downward step\n }\n else if (Math.random() < 0.5) { // else choose random downward or upward step\n index--;\n }\n else {\n index++;\n }\n yield values[index];\n }\n}\n/**\n * PatternGenerator returns a generator which will iterate over the given array\n * of values and yield the items according to the passed in pattern\n * @param values An array of values to iterate over\n * @param pattern The name of the pattern use when iterating over\n * @param index Where to start in the offset of the values array\n */\nfunction* PatternGenerator_PatternGenerator(values, pattern = "up", index = 0) {\n // safeguards\n assert(values.length > 0, "The array must have more than one value in it");\n switch (pattern) {\n case "up":\n yield* infiniteGen(values, upPatternGen);\n case "down":\n yield* infiniteGen(values, downPatternGen);\n case "upDown":\n yield* alternatingGenerator(values, true);\n case "downUp":\n yield* alternatingGenerator(values, false);\n case "alternateUp":\n yield* infiniteGen(values, jumpUp);\n case "alternateDown":\n yield* infiniteGen(values, jumpDown);\n case "random":\n yield* randomGen(values);\n case "randomOnce":\n yield* infiniteGen(values, randomOnce);\n case "randomWalk":\n yield* randomWalk(values);\n }\n}\n//# sourceMappingURL=PatternGenerator.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/Pattern.js\n\n\n\n\n/**\n * Pattern arpeggiates between the given notes\n * in a number of patterns.\n * @example\n * const pattern = new Tone.Pattern((time, note) => {\n * \t// the order of the notes passed in depends on the pattern\n * }, ["C2", "D4", "E5", "A6"], "upDown");\n * @category Event\n */\nclass Pattern extends (/* unused pure expression or super */ null && (Loop)) {\n constructor() {\n super(optionsFromArguments(Pattern.getDefaults(), arguments, ["callback", "values", "pattern"]));\n this.name = "Pattern";\n const options = optionsFromArguments(Pattern.getDefaults(), arguments, ["callback", "values", "pattern"]);\n this.callback = options.callback;\n this._values = options.values;\n this._pattern = PatternGenerator(options.values, options.pattern);\n this._type = options.pattern;\n }\n static getDefaults() {\n return Object.assign(Loop.getDefaults(), {\n pattern: "up",\n values: [],\n callback: noOp,\n });\n }\n /**\n * Internal function called when the notes should be called\n */\n _tick(time) {\n const value = this._pattern.next();\n this._value = value.value;\n this.callback(time, this._value);\n }\n /**\n * The array of events.\n */\n get values() {\n return this._values;\n }\n set values(val) {\n this._values = val;\n // reset the pattern\n this.pattern = this._type;\n }\n /**\n * The current value of the pattern.\n */\n get value() {\n return this._value;\n }\n /**\n * The pattern type. See Tone.CtrlPattern for the full list of patterns.\n */\n get pattern() {\n return this._type;\n }\n set pattern(pattern) {\n this._type = pattern;\n this._pattern = PatternGenerator(this._values, this._type);\n }\n}\n//# sourceMappingURL=Pattern.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/Sequence.js\n\n\n\n\n\n/**\n * A sequence is an alternate notation of a part. Instead\n * of passing in an array of [time, event] pairs, pass\n * in an array of events which will be spaced at the\n * given subdivision. Sub-arrays will subdivide that beat\n * by the number of items are in the array.\n * Sequence notation inspiration from [Tidal](http://yaxu.org/tidal/)\n * @example\n * const synth = new Tone.Synth().toDestination();\n * const seq = new Tone.Sequence((time, note) => {\n * \tsynth.triggerAttackRelease(note, 0.1, time);\n * \t// subdivisions are given as subarrays\n * }, ["C4", ["E4", "D4", "E4"], "G4", ["A4", "G4"]]).start(0);\n * Tone.Transport.start();\n * @category Event\n */\nclass Sequence extends (/* unused pure expression or super */ null && (ToneEvent)) {\n constructor() {\n super(optionsFromArguments(Sequence.getDefaults(), arguments, ["callback", "events", "subdivision"]));\n this.name = "Sequence";\n /**\n * The object responsible for scheduling all of the events\n */\n this._part = new Part({\n callback: this._seqCallback.bind(this),\n context: this.context,\n });\n /**\n * private reference to all of the sequence proxies\n */\n this._events = [];\n /**\n * The proxied array\n */\n this._eventsArray = [];\n const options = optionsFromArguments(Sequence.getDefaults(), arguments, ["callback", "events", "subdivision"]);\n this._subdivision = this.toTicks(options.subdivision);\n this.events = options.events;\n // set all of the values\n this.loop = options.loop;\n this.loopStart = options.loopStart;\n this.loopEnd = options.loopEnd;\n this.playbackRate = options.playbackRate;\n this.probability = options.probability;\n this.humanize = options.humanize;\n this.mute = options.mute;\n this.playbackRate = options.playbackRate;\n }\n static getDefaults() {\n return Object.assign(omitFromObject(ToneEvent.getDefaults(), ["value"]), {\n events: [],\n loop: true,\n loopEnd: 0,\n loopStart: 0,\n subdivision: "8n",\n });\n }\n /**\n * The internal callback for when an event is invoked\n */\n _seqCallback(time, value) {\n if (value !== null) {\n this.callback(time, value);\n }\n }\n /**\n * The sequence\n */\n get events() {\n return this._events;\n }\n set events(s) {\n this.clear();\n this._eventsArray = s;\n this._events = this._createSequence(this._eventsArray);\n this._eventsUpdated();\n }\n /**\n * Start the part at the given time.\n * @param time When to start the part.\n * @param offset The offset index to start at\n */\n start(time, offset) {\n this._part.start(time, offset ? this._indexTime(offset) : offset);\n return this;\n }\n /**\n * Stop the part at the given time.\n * @param time When to stop the part.\n */\n stop(time) {\n this._part.stop(time);\n return this;\n }\n /**\n * The subdivision of the sequence. This can only be\n * set in the constructor. The subdivision is the\n * interval between successive steps.\n */\n get subdivision() {\n return new TicksClass(this.context, this._subdivision).toSeconds();\n }\n /**\n * Create a sequence proxy which can be monitored to create subsequences\n */\n _createSequence(array) {\n return new Proxy(array, {\n get: (target, property) => {\n // property is index in this case\n return target[property];\n },\n set: (target, property, value) => {\n if (isString(property) && isFinite(parseInt(property, 10))) {\n if (isArray(value)) {\n target[property] = this._createSequence(value);\n }\n else {\n target[property] = value;\n }\n }\n else {\n target[property] = value;\n }\n this._eventsUpdated();\n // return true to accept the changes\n return true;\n },\n });\n }\n /**\n * When the sequence has changed, all of the events need to be recreated\n */\n _eventsUpdated() {\n this._part.clear();\n this._rescheduleSequence(this._eventsArray, this._subdivision, this.startOffset);\n // update the loopEnd\n this.loopEnd = this.loopEnd;\n }\n /**\n * reschedule all of the events that need to be rescheduled\n */\n _rescheduleSequence(sequence, subdivision, startOffset) {\n sequence.forEach((value, index) => {\n const eventOffset = index * (subdivision) + startOffset;\n if (isArray(value)) {\n this._rescheduleSequence(value, subdivision / value.length, eventOffset);\n }\n else {\n const startTime = new TicksClass(this.context, eventOffset, "i").toSeconds();\n this._part.add(startTime, value);\n }\n });\n }\n /**\n * Get the time of the index given the Sequence\'s subdivision\n * @param index\n * @return The time of that index\n */\n _indexTime(index) {\n return new TicksClass(this.context, index * (this._subdivision) + this.startOffset).toSeconds();\n }\n /**\n * Clear all of the events\n */\n clear() {\n this._part.clear();\n return this;\n }\n dispose() {\n super.dispose();\n this._part.dispose();\n return this;\n }\n //-------------------------------------\n // PROXY CALLS\n //-------------------------------------\n get loop() {\n return this._part.loop;\n }\n set loop(l) {\n this._part.loop = l;\n }\n /**\n * The index at which the sequence should start looping\n */\n get loopStart() {\n return this._loopStart;\n }\n set loopStart(index) {\n this._loopStart = index;\n this._part.loopStart = this._indexTime(index);\n }\n /**\n * The index at which the sequence should end looping\n */\n get loopEnd() {\n return this._loopEnd;\n }\n set loopEnd(index) {\n this._loopEnd = index;\n if (index === 0) {\n this._part.loopEnd = this._indexTime(this._eventsArray.length);\n }\n else {\n this._part.loopEnd = this._indexTime(index);\n }\n }\n get startOffset() {\n return this._part.startOffset;\n }\n set startOffset(start) {\n this._part.startOffset = start;\n }\n get playbackRate() {\n return this._part.playbackRate;\n }\n set playbackRate(rate) {\n this._part.playbackRate = rate;\n }\n get probability() {\n return this._part.probability;\n }\n set probability(prob) {\n this._part.probability = prob;\n }\n get progress() {\n return this._part.progress;\n }\n get humanize() {\n return this._part.humanize;\n }\n set humanize(variation) {\n this._part.humanize = variation;\n }\n /**\n * The number of scheduled events\n */\n get length() {\n return this._part.length;\n }\n}\n//# sourceMappingURL=Sequence.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/event/index.js\n\n\n\n\n\n//# sourceMappingURL=index.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/channel/CrossFade.js\n\n\n\n\n\n\n/**\n * Tone.Crossfade provides equal power fading between two inputs.\n * More on crossfading technique [here](https://en.wikipedia.org/wiki/Fade_(audio_engineering)#Crossfading).\n * ```\n * +---------+\n * +> input a +>--+\n * +-----------+ +---------------------+ | | |\n * | 1s signal +>--\x3e stereoPannerNode L +>----\x3e gain | |\n * +-----------+ | | +---------+ |\n * +-> pan R +>-+ | +--------+\n * | +---------------------+ | +---\x3e output +>\n * +------+ | | +---------+ | +--------+\n * | fade +>----+ | +> input b +>--+\n * +------+ | | |\n * +--\x3e gain |\n * +---------+\n * ```\n * @example\n * const crossFade = new Tone.CrossFade().toDestination();\n * // connect two inputs Tone.to a/b\n * const inputA = new Tone.Oscillator(440, "square").connect(crossFade.a).start();\n * const inputB = new Tone.Oscillator(440, "sine").connect(crossFade.b).start();\n * // use the fade to control the mix between the two\n * crossFade.fade.value = 0.5;\n * @category Component\n */\nclass CrossFade_CrossFade extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(Object.assign(optionsFromArguments(CrossFade_CrossFade.getDefaults(), arguments, ["fade"])));\n this.name = "CrossFade";\n /**\n * The crossfading is done by a StereoPannerNode\n */\n this._panner = this.context.createStereoPanner();\n /**\n * Split the output of the panner node into two values used to control the gains.\n */\n this._split = this.context.createChannelSplitter(2);\n /**\n * Convert the fade value into an audio range value so it can be connected\n * to the panner.pan AudioParam\n */\n this._g2a = new GainToAudio({ context: this.context });\n /**\n * The input which is at full level when fade = 0\n */\n this.a = new Gain({\n context: this.context,\n gain: 0,\n });\n /**\n * The input which is at full level when fade = 1\n */\n this.b = new Gain({\n context: this.context,\n gain: 0,\n });\n /**\n * The output is a mix between `a` and `b` at the ratio of `fade`\n */\n this.output = new Gain({ context: this.context });\n this._internalChannels = [this.a, this.b];\n const options = optionsFromArguments(CrossFade_CrossFade.getDefaults(), arguments, ["fade"]);\n this.fade = new Signal({\n context: this.context,\n units: "normalRange",\n value: options.fade,\n });\n readOnly(this, "fade");\n this.context.getConstant(1).connect(this._panner);\n this._panner.connect(this._split);\n // this is necessary for standardized-audio-context\n // doesn\'t make any difference for the native AudioContext\n // https://github.com/chrisguttandin/standardized-audio-context/issues/647\n this._panner.channelCount = 1;\n this._panner.channelCountMode = "explicit";\n connect(this._split, this.a.gain, 0);\n connect(this._split, this.b.gain, 1);\n this.fade.chain(this._g2a, this._panner.pan);\n this.a.connect(this.output);\n this.b.connect(this.output);\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n fade: 0.5,\n });\n }\n dispose() {\n super.dispose();\n this.a.dispose();\n this.b.dispose();\n this.output.dispose();\n this.fade.dispose();\n this._g2a.dispose();\n this._panner.disconnect();\n this._split.disconnect();\n return this;\n }\n}\n//# sourceMappingURL=CrossFade.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Effect.js\n\n\n\n\n/**\n * Effect is the base class for effects. Connect the effect between\n * the effectSend and effectReturn GainNodes, then control the amount of\n * effect which goes to the output using the wet control.\n */\nclass Effect_Effect extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor(options) {\n super(options);\n this.name = "Effect";\n /**\n * the drywet knob to control the amount of effect\n */\n this._dryWet = new CrossFade({ context: this.context });\n /**\n * The wet control is how much of the effected\n * will pass through to the output. 1 = 100% effected\n * signal, 0 = 100% dry signal.\n */\n this.wet = this._dryWet.fade;\n /**\n * connect the effectSend to the input of hte effect\n */\n this.effectSend = new Gain({ context: this.context });\n /**\n * connect the output of the effect to the effectReturn\n */\n this.effectReturn = new Gain({ context: this.context });\n /**\n * The effect input node\n */\n this.input = new Gain({ context: this.context });\n /**\n * The effect output\n */\n this.output = this._dryWet;\n // connections\n this.input.fan(this._dryWet.a, this.effectSend);\n this.effectReturn.connect(this._dryWet.b);\n this.wet.setValueAtTime(options.wet, 0);\n this._internalChannels = [this.effectReturn, this.effectSend];\n readOnly(this, "wet");\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n wet: 1,\n });\n }\n /**\n * chains the effect in between the effectSend and effectReturn\n */\n connectEffect(effect) {\n // add it to the internal channels\n this._internalChannels.push(effect);\n this.effectSend.chain(effect, this.effectReturn);\n return this;\n }\n dispose() {\n super.dispose();\n this._dryWet.dispose();\n this.effectSend.dispose();\n this.effectReturn.dispose();\n this.wet.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Effect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/LFOEffect.js\n\n\n\n/**\n * Base class for LFO-based effects.\n */\nclass LFOEffect_LFOEffect extends (/* unused pure expression or super */ null && (Effect)) {\n constructor(options) {\n super(options);\n this.name = "LFOEffect";\n this._lfo = new LFO({\n context: this.context,\n frequency: options.frequency,\n amplitude: options.depth,\n });\n this.depth = this._lfo.amplitude;\n this.frequency = this._lfo.frequency;\n this.type = options.type;\n readOnly(this, ["frequency", "depth"]);\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n frequency: 1,\n type: "sine",\n depth: 1,\n });\n }\n /**\n * Start the effect.\n */\n start(time) {\n this._lfo.start(time);\n return this;\n }\n /**\n * Stop the lfo\n */\n stop(time) {\n this._lfo.stop(time);\n return this;\n }\n /**\n * Sync the filter to the transport. See [[LFO.sync]]\n */\n sync() {\n this._lfo.sync();\n return this;\n }\n /**\n * Unsync the filter from the transport.\n */\n unsync() {\n this._lfo.unsync();\n return this;\n }\n /**\n * The type of the LFO\'s oscillator: See [[Oscillator.type]]\n * @example\n * const autoFilter = new Tone.AutoFilter().start().toDestination();\n * const noise = new Tone.Noise().start().connect(autoFilter);\n * autoFilter.type = "square";\n */\n get type() {\n return this._lfo.type;\n }\n set type(type) {\n this._lfo.type = type;\n }\n dispose() {\n super.dispose();\n this._lfo.dispose();\n this.frequency.dispose();\n this.depth.dispose();\n return this;\n }\n}\n//# sourceMappingURL=LFOEffect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/AutoFilter.js\n\n\n\n/**\n * AutoFilter is a Tone.Filter with a Tone.LFO connected to the filter cutoff frequency.\n * Setting the LFO rate and depth allows for control over the filter modulation rate\n * and depth.\n *\n * @example\n * // create an autofilter and start it\'s LFO\n * const autoFilter = new Tone.AutoFilter("4n").toDestination().start();\n * // route an oscillator through the filter and start it\n * const oscillator = new Tone.Oscillator().connect(autoFilter).start();\n * @category Effect\n */\nclass AutoFilter extends (/* unused pure expression or super */ null && (LFOEffect)) {\n constructor() {\n super(optionsFromArguments(AutoFilter.getDefaults(), arguments, ["frequency", "baseFrequency", "octaves"]));\n this.name = "AutoFilter";\n const options = optionsFromArguments(AutoFilter.getDefaults(), arguments, ["frequency", "baseFrequency", "octaves"]);\n this.filter = new Filter(Object.assign(options.filter, {\n context: this.context,\n }));\n // connections\n this.connectEffect(this.filter);\n this._lfo.connect(this.filter.frequency);\n this.octaves = options.octaves;\n this.baseFrequency = options.baseFrequency;\n }\n static getDefaults() {\n return Object.assign(LFOEffect.getDefaults(), {\n baseFrequency: 200,\n octaves: 2.6,\n filter: {\n type: "lowpass",\n rolloff: -12,\n Q: 1,\n }\n });\n }\n /**\n * The minimum value of the filter\'s cutoff frequency.\n */\n get baseFrequency() {\n return this._lfo.min;\n }\n set baseFrequency(freq) {\n this._lfo.min = this.toFrequency(freq);\n // and set the max\n this.octaves = this._octaves;\n }\n /**\n * The maximum value of the filter\'s cutoff frequency.\n */\n get octaves() {\n return this._octaves;\n }\n set octaves(oct) {\n this._octaves = oct;\n this._lfo.max = this._lfo.min * Math.pow(2, oct);\n }\n dispose() {\n super.dispose();\n this.filter.dispose();\n return this;\n }\n}\n//# sourceMappingURL=AutoFilter.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/channel/Panner.js\n\n\n\n\n/**\n * Panner is an equal power Left/Right Panner. It is a wrapper around the StereoPannerNode.\n * @example\n * return Tone.Offline(() => {\n * // move the input signal from right to left\n * \tconst panner = new Tone.Panner(1).toDestination();\n * \tpanner.pan.rampTo(-1, 0.5);\n * \tconst osc = new Tone.Oscillator(100).connect(panner).start();\n * }, 0.5, 2);\n * @category Component\n */\nclass Panner_Panner extends ToneAudioNode_ToneAudioNode {\n constructor() {\n super(Object.assign(Defaults_optionsFromArguments(Panner_Panner.getDefaults(), arguments, ["pan"])));\n this.name = "Panner";\n /**\n * the panner node\n */\n this._panner = this.context.createStereoPanner();\n this.input = this._panner;\n this.output = this._panner;\n const options = Defaults_optionsFromArguments(Panner_Panner.getDefaults(), arguments, ["pan"]);\n this.pan = new Param_Param({\n context: this.context,\n param: this._panner.pan,\n value: options.pan,\n minValue: -1,\n maxValue: 1,\n });\n // this is necessary for standardized-audio-context\n // doesn\'t make any difference for the native AudioContext\n // https://github.com/chrisguttandin/standardized-audio-context/issues/647\n this._panner.channelCount = options.channelCount;\n this._panner.channelCountMode = "explicit";\n // initial value\n Interface_readOnly(this, "pan");\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode_ToneAudioNode.getDefaults(), {\n pan: 0,\n channelCount: 1,\n });\n }\n dispose() {\n super.dispose();\n this._panner.disconnect();\n this.pan.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Panner.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/AutoPanner.js\n\n\n\n/**\n * AutoPanner is a [[Panner]] with an [[LFO]] connected to the pan amount.\n * [Related Reading](https://www.ableton.com/en/blog/autopan-chopper-effect-and-more-liveschool/).\n *\n * @example\n * // create an autopanner and start it\n * const autoPanner = new Tone.AutoPanner("4n").toDestination().start();\n * // route an oscillator through the panner and start it\n * const oscillator = new Tone.Oscillator().connect(autoPanner).start();\n * @category Effect\n */\nclass AutoPanner extends (/* unused pure expression or super */ null && (LFOEffect)) {\n constructor() {\n super(optionsFromArguments(AutoPanner.getDefaults(), arguments, ["frequency"]));\n this.name = "AutoPanner";\n const options = optionsFromArguments(AutoPanner.getDefaults(), arguments, ["frequency"]);\n this._panner = new Panner({\n context: this.context,\n channelCount: options.channelCount\n });\n // connections\n this.connectEffect(this._panner);\n this._lfo.connect(this._panner.pan);\n this._lfo.min = -1;\n this._lfo.max = 1;\n }\n static getDefaults() {\n return Object.assign(LFOEffect.getDefaults(), {\n channelCount: 1\n });\n }\n dispose() {\n super.dispose();\n this._panner.dispose();\n return this;\n }\n}\n//# sourceMappingURL=AutoPanner.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/analysis/Follower.js\n\n\n\n\n/**\n * Follower is a simple envelope follower.\n * It\'s implemented by applying a lowpass filter to the absolute value of the incoming signal.\n * ```\n * +-----+ +---------------+\n * Input +--\x3e Abs +----\x3e OnePoleFilter +--\x3e Output\n * +-----+ +---------------+\n * ```\n * @category Component\n */\nclass Follower_Follower extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(Follower_Follower.getDefaults(), arguments, ["smoothing"]));\n this.name = "Follower";\n const options = optionsFromArguments(Follower_Follower.getDefaults(), arguments, ["smoothing"]);\n this._abs = this.input = new Abs({ context: this.context });\n this._lowpass = this.output = new OnePoleFilter({\n context: this.context,\n frequency: 1 / this.toSeconds(options.smoothing),\n type: "lowpass"\n });\n this._abs.connect(this._lowpass);\n this._smoothing = options.smoothing;\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n smoothing: 0.05\n });\n }\n /**\n * The amount of time it takes a value change to arrive at the updated value.\n */\n get smoothing() {\n return this._smoothing;\n }\n set smoothing(smoothing) {\n this._smoothing = smoothing;\n this._lowpass.frequency = 1 / this.toSeconds(this.smoothing);\n }\n dispose() {\n super.dispose();\n this._abs.dispose();\n this._lowpass.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Follower.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/AutoWah.js\n\n\n\n\n\n\n\n\n/**\n * AutoWah connects a [[Follower]] to a [[Filter]].\n * The frequency of the filter, follows the input amplitude curve.\n * Inspiration from [Tuna.js](https://github.com/Dinahmoe/tuna).\n *\n * @example\n * const autoWah = new Tone.AutoWah(50, 6, -30).toDestination();\n * // initialize the synth and connect to autowah\n * const synth = new Tone.Synth().connect(autoWah);\n * // Q value influences the effect of the wah - default is 2\n * autoWah.Q.value = 6;\n * // more audible on higher notes\n * synth.triggerAttackRelease("C4", "8n");\n * @category Effect\n */\nclass AutoWah extends (/* unused pure expression or super */ null && (Effect)) {\n constructor() {\n super(optionsFromArguments(AutoWah.getDefaults(), arguments, ["baseFrequency", "octaves", "sensitivity"]));\n this.name = "AutoWah";\n const options = optionsFromArguments(AutoWah.getDefaults(), arguments, ["baseFrequency", "octaves", "sensitivity"]);\n this._follower = new Follower({\n context: this.context,\n smoothing: options.follower,\n });\n this._sweepRange = new ScaleExp({\n context: this.context,\n min: 0,\n max: 1,\n exponent: 0.5,\n });\n this._baseFrequency = this.toFrequency(options.baseFrequency);\n this._octaves = options.octaves;\n this._inputBoost = new Gain({ context: this.context });\n this._bandpass = new Filter({\n context: this.context,\n rolloff: -48,\n frequency: 0,\n Q: options.Q,\n });\n this._peaking = new Filter({\n context: this.context,\n type: "peaking"\n });\n this._peaking.gain.value = options.gain;\n this.gain = this._peaking.gain;\n this.Q = this._bandpass.Q;\n // the control signal path\n this.effectSend.chain(this._inputBoost, this._follower, this._sweepRange);\n this._sweepRange.connect(this._bandpass.frequency);\n this._sweepRange.connect(this._peaking.frequency);\n // the filtered path\n this.effectSend.chain(this._bandpass, this._peaking, this.effectReturn);\n // set the initial value\n this._setSweepRange();\n this.sensitivity = options.sensitivity;\n readOnly(this, ["gain", "Q"]);\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n baseFrequency: 100,\n octaves: 6,\n sensitivity: 0,\n Q: 2,\n gain: 2,\n follower: 0.2,\n });\n }\n /**\n * The number of octaves that the filter will sweep above the baseFrequency.\n */\n get octaves() {\n return this._octaves;\n }\n set octaves(octaves) {\n this._octaves = octaves;\n this._setSweepRange();\n }\n /**\n * The follower\'s smoothing time\n */\n get follower() {\n return this._follower.smoothing;\n }\n set follower(follower) {\n this._follower.smoothing = follower;\n }\n /**\n * The base frequency from which the sweep will start from.\n */\n get baseFrequency() {\n return this._baseFrequency;\n }\n set baseFrequency(baseFreq) {\n this._baseFrequency = this.toFrequency(baseFreq);\n this._setSweepRange();\n }\n /**\n * The sensitivity to control how responsive to the input signal the filter is.\n */\n get sensitivity() {\n return gainToDb(1 / this._inputBoost.gain.value);\n }\n set sensitivity(sensitivity) {\n this._inputBoost.gain.value = 1 / dbToGain(sensitivity);\n }\n /**\n * sets the sweep range of the scaler\n */\n _setSweepRange() {\n this._sweepRange.min = this._baseFrequency;\n this._sweepRange.max = Math.min(this._baseFrequency * Math.pow(2, this._octaves), this.context.sampleRate / 2);\n }\n dispose() {\n super.dispose();\n this._follower.dispose();\n this._sweepRange.dispose();\n this._bandpass.dispose();\n this._peaking.dispose();\n this._inputBoost.dispose();\n return this;\n }\n}\n//# sourceMappingURL=AutoWah.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/BitCrusher.worklet.js\n\n\nconst BitCrusher_worklet_workletName = "bit-crusher";\nconst bitCrusherWorklet = /* javascript */ `\n\tclass BitCrusherWorklet extends SingleIOProcessor {\n\n\t\tstatic get parameterDescriptors() {\n\t\t\treturn [{\n\t\t\t\tname: "bits",\n\t\t\t\tdefaultValue: 12,\n\t\t\t\tminValue: 1,\n\t\t\t\tmaxValue: 16,\n\t\t\t\tautomationRate: \'k-rate\'\n\t\t\t}];\n\t\t}\n\n\t\tgenerate(input, _channel, parameters) {\n\t\t\tconst step = Math.pow(0.5, parameters.bits - 1);\n\t\t\tconst val = step * Math.floor(input / step + 0.5);\n\t\t\treturn val;\n\t\t}\n\t}\n`;\nregisterProcessor(BitCrusher_worklet_workletName, bitCrusherWorklet);\n//# sourceMappingURL=BitCrusher.worklet.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/BitCrusher.js\n\n\n\n\n\n\n\n/**\n * BitCrusher down-samples the incoming signal to a different bit depth.\n * Lowering the bit depth of the signal creates distortion. Read more about BitCrushing\n * on [Wikipedia](https://en.wikipedia.org/wiki/Bitcrusher).\n * @example\n * // initialize crusher and route a synth through it\n * const crusher = new Tone.BitCrusher(4).toDestination();\n * const synth = new Tone.Synth().connect(crusher);\n * synth.triggerAttackRelease("C2", 2);\n *\n * @category Effect\n */\nclass BitCrusher extends (/* unused pure expression or super */ null && (Effect)) {\n constructor() {\n super(optionsFromArguments(BitCrusher.getDefaults(), arguments, ["bits"]));\n this.name = "BitCrusher";\n const options = optionsFromArguments(BitCrusher.getDefaults(), arguments, ["bits"]);\n this._bitCrusherWorklet = new BitCrusherWorklet({\n context: this.context,\n bits: options.bits,\n });\n // connect it up\n this.connectEffect(this._bitCrusherWorklet);\n this.bits = this._bitCrusherWorklet.bits;\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n bits: 4,\n });\n }\n dispose() {\n super.dispose();\n this._bitCrusherWorklet.dispose();\n return this;\n }\n}\n/**\n * Internal class which creates an AudioWorklet to do the bit crushing\n */\nclass BitCrusherWorklet extends (/* unused pure expression or super */ null && (ToneAudioWorklet)) {\n constructor() {\n super(optionsFromArguments(BitCrusherWorklet.getDefaults(), arguments));\n this.name = "BitCrusherWorklet";\n const options = optionsFromArguments(BitCrusherWorklet.getDefaults(), arguments);\n this.input = new Gain({ context: this.context });\n this.output = new Gain({ context: this.context });\n this.bits = new Param({\n context: this.context,\n value: options.bits,\n units: "positive",\n minValue: 1,\n maxValue: 16,\n param: this._dummyParam,\n swappable: true,\n });\n }\n static getDefaults() {\n return Object.assign(ToneAudioWorklet.getDefaults(), {\n bits: 12,\n });\n }\n _audioWorkletName() {\n return workletName;\n }\n onReady(node) {\n connectSeries(this.input, node, this.output);\n const bits = node.parameters.get("bits");\n this.bits.setParam(bits);\n }\n dispose() {\n super.dispose();\n this.input.dispose();\n this.output.dispose();\n this.bits.dispose();\n return this;\n }\n}\n//# sourceMappingURL=BitCrusher.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Chebyshev.js\n\n\n\n/**\n * Chebyshev is a waveshaper which is good\n * for making different types of distortion sounds.\n * Note that odd orders sound very different from even ones,\n * and order = 1 is no change.\n * Read more at [music.columbia.edu](http://music.columbia.edu/cmc/musicandcomputers/chapter4/04_06.php).\n * @example\n * // create a new cheby\n * const cheby = new Tone.Chebyshev(50).toDestination();\n * // create a monosynth connected to our cheby\n * const synth = new Tone.MonoSynth().connect(cheby);\n * synth.triggerAttackRelease("C2", 0.4);\n * @category Effect\n */\nclass Chebyshev extends (/* unused pure expression or super */ null && (Effect)) {\n constructor() {\n super(optionsFromArguments(Chebyshev.getDefaults(), arguments, ["order"]));\n this.name = "Chebyshev";\n const options = optionsFromArguments(Chebyshev.getDefaults(), arguments, ["order"]);\n this._shaper = new WaveShaper({\n context: this.context,\n length: 4096\n });\n this._order = options.order;\n this.connectEffect(this._shaper);\n this.order = options.order;\n this.oversample = options.oversample;\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n order: 1,\n oversample: "none"\n });\n }\n /**\n * get the coefficient for that degree\n * @param x the x value\n * @param degree\n * @param memo memoize the computed value. this speeds up computation greatly.\n */\n _getCoefficient(x, degree, memo) {\n if (memo.has(degree)) {\n return memo.get(degree);\n }\n else if (degree === 0) {\n memo.set(degree, 0);\n }\n else if (degree === 1) {\n memo.set(degree, x);\n }\n else {\n memo.set(degree, 2 * x * this._getCoefficient(x, degree - 1, memo) - this._getCoefficient(x, degree - 2, memo));\n }\n return memo.get(degree);\n }\n /**\n * The order of the Chebyshev polynomial which creates the equation which is applied to the incoming\n * signal through a Tone.WaveShaper. The equations are in the form:\n * ```\n * order 2: 2x^2 + 1\n * order 3: 4x^3 + 3x\n * ```\n * @min 1\n * @max 100\n */\n get order() {\n return this._order;\n }\n set order(order) {\n this._order = order;\n this._shaper.setMap((x => {\n return this._getCoefficient(x, order, new Map());\n }));\n }\n /**\n * The oversampling of the effect. Can either be "none", "2x" or "4x".\n */\n get oversample() {\n return this._shaper.oversample;\n }\n set oversample(oversampling) {\n this._shaper.oversample = oversampling;\n }\n dispose() {\n super.dispose();\n this._shaper.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Chebyshev.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/channel/Split.js\n\n\n/**\n * Split splits an incoming signal into the number of given channels.\n *\n * @example\n * const split = new Tone.Split();\n * // stereoSignal.connect(split);\n * @category Component\n */\nclass Split_Split extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(Split_Split.getDefaults(), arguments, ["channels"]));\n this.name = "Split";\n const options = optionsFromArguments(Split_Split.getDefaults(), arguments, ["channels"]);\n this._splitter = this.input = this.output = this.context.createChannelSplitter(options.channels);\n this._internalChannels = [this._splitter];\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n channels: 2,\n });\n }\n dispose() {\n super.dispose();\n this._splitter.disconnect();\n return this;\n }\n}\n//# sourceMappingURL=Split.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/channel/Merge.js\n\n\n/**\n * Merge brings multiple mono input channels into a single multichannel output channel.\n *\n * @example\n * const merge = new Tone.Merge().toDestination();\n * // routing a sine tone in the left channel\n * const osc = new Tone.Oscillator().connect(merge, 0, 0).start();\n * // and noise in the right channel\n * const noise = new Tone.Noise().connect(merge, 0, 1).start();;\n * @category Component\n */\nclass Merge_Merge extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(Merge_Merge.getDefaults(), arguments, ["channels"]));\n this.name = "Merge";\n const options = optionsFromArguments(Merge_Merge.getDefaults(), arguments, ["channels"]);\n this._merger = this.output = this.input = this.context.createChannelMerger(options.channels);\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n channels: 2,\n });\n }\n dispose() {\n super.dispose();\n this._merger.disconnect();\n return this;\n }\n}\n//# sourceMappingURL=Merge.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/StereoEffect.js\n\n\n\n\n\n\n/**\n * Base class for Stereo effects.\n */\nclass StereoEffect_StereoEffect extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor(options) {\n super(options);\n this.name = "StereoEffect";\n this.input = new Gain({ context: this.context });\n // force mono sources to be stereo\n this.input.channelCount = 2;\n this.input.channelCountMode = "explicit";\n this._dryWet = this.output = new CrossFade({\n context: this.context,\n fade: options.wet\n });\n this.wet = this._dryWet.fade;\n this._split = new Split({ context: this.context, channels: 2 });\n this._merge = new Merge({ context: this.context, channels: 2 });\n // connections\n this.input.connect(this._split);\n // dry wet connections\n this.input.connect(this._dryWet.a);\n this._merge.connect(this._dryWet.b);\n readOnly(this, ["wet"]);\n }\n /**\n * Connect the left part of the effect\n */\n connectEffectLeft(...nodes) {\n this._split.connect(nodes[0], 0, 0);\n connectSeries(...nodes);\n connect(nodes[nodes.length - 1], this._merge, 0, 0);\n }\n /**\n * Connect the right part of the effect\n */\n connectEffectRight(...nodes) {\n this._split.connect(nodes[0], 1, 0);\n connectSeries(...nodes);\n connect(nodes[nodes.length - 1], this._merge, 0, 1);\n }\n static getDefaults() {\n return Object.assign(ToneAudioNode.getDefaults(), {\n wet: 1,\n });\n }\n dispose() {\n super.dispose();\n this._dryWet.dispose();\n this._split.dispose();\n this._merge.dispose();\n return this;\n }\n}\n//# sourceMappingURL=StereoEffect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/StereoFeedbackEffect.js\n\n\n\n\n\n\n/**\n * Base class for stereo feedback effects where the effectReturn is fed back into the same channel.\n */\nclass StereoFeedbackEffect_StereoFeedbackEffect extends (/* unused pure expression or super */ null && (StereoEffect)) {\n constructor(options) {\n super(options);\n this.feedback = new Signal({\n context: this.context,\n value: options.feedback,\n units: "normalRange"\n });\n this._feedbackL = new Gain({ context: this.context });\n this._feedbackR = new Gain({ context: this.context });\n this._feedbackSplit = new Split({ context: this.context, channels: 2 });\n this._feedbackMerge = new Merge({ context: this.context, channels: 2 });\n this._merge.connect(this._feedbackSplit);\n this._feedbackMerge.connect(this._split);\n // the left output connected to the left input\n this._feedbackSplit.connect(this._feedbackL, 0, 0);\n this._feedbackL.connect(this._feedbackMerge, 0, 0);\n // the right output connected to the right input\n this._feedbackSplit.connect(this._feedbackR, 1, 0);\n this._feedbackR.connect(this._feedbackMerge, 0, 1);\n // the feedback control\n this.feedback.fan(this._feedbackL.gain, this._feedbackR.gain);\n readOnly(this, ["feedback"]);\n }\n static getDefaults() {\n return Object.assign(StereoEffect.getDefaults(), {\n feedback: 0.5,\n });\n }\n dispose() {\n super.dispose();\n this.feedback.dispose();\n this._feedbackL.dispose();\n this._feedbackR.dispose();\n this._feedbackSplit.dispose();\n this._feedbackMerge.dispose();\n return this;\n }\n}\n//# sourceMappingURL=StereoFeedbackEffect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Chorus.js\n\n\n\n\n\n/**\n * Chorus is a stereo chorus effect composed of a left and right delay with an [[LFO]] applied to the delayTime of each channel.\n * When [[feedback]] is set to a value larger than 0, you also get Flanger-type effects.\n * Inspiration from [Tuna.js](https://github.com/Dinahmoe/tuna/blob/master/tuna.js).\n * Read more on the chorus effect on [SoundOnSound](http://www.soundonsound.com/sos/jun04/articles/synthsecrets.htm).\n *\n * @example\n * const chorus = new Tone.Chorus(4, 2.5, 0.5).toDestination().start();\n * const synth = new Tone.PolySynth().connect(chorus);\n * synth.triggerAttackRelease(["C3", "E3", "G3"], "8n");\n *\n * @category Effect\n */\nclass Chorus extends (/* unused pure expression or super */ null && (StereoFeedbackEffect)) {\n constructor() {\n super(optionsFromArguments(Chorus.getDefaults(), arguments, ["frequency", "delayTime", "depth"]));\n this.name = "Chorus";\n const options = optionsFromArguments(Chorus.getDefaults(), arguments, ["frequency", "delayTime", "depth"]);\n this._depth = options.depth;\n this._delayTime = options.delayTime / 1000;\n this._lfoL = new LFO({\n context: this.context,\n frequency: options.frequency,\n min: 0,\n max: 1,\n });\n this._lfoR = new LFO({\n context: this.context,\n frequency: options.frequency,\n min: 0,\n max: 1,\n phase: 180\n });\n this._delayNodeL = new Delay({ context: this.context });\n this._delayNodeR = new Delay({ context: this.context });\n this.frequency = this._lfoL.frequency;\n readOnly(this, ["frequency"]);\n // have one LFO frequency control the other\n this._lfoL.frequency.connect(this._lfoR.frequency);\n // connections\n this.connectEffectLeft(this._delayNodeL);\n this.connectEffectRight(this._delayNodeR);\n // lfo setup\n this._lfoL.connect(this._delayNodeL.delayTime);\n this._lfoR.connect(this._delayNodeR.delayTime);\n // set the initial values\n this.depth = this._depth;\n this.type = options.type;\n this.spread = options.spread;\n }\n static getDefaults() {\n return Object.assign(StereoFeedbackEffect.getDefaults(), {\n frequency: 1.5,\n delayTime: 3.5,\n depth: 0.7,\n type: "sine",\n spread: 180,\n feedback: 0,\n wet: 0.5,\n });\n }\n /**\n * The depth of the effect. A depth of 1 makes the delayTime\n * modulate between 0 and 2*delayTime (centered around the delayTime).\n */\n get depth() {\n return this._depth;\n }\n set depth(depth) {\n this._depth = depth;\n const deviation = this._delayTime * depth;\n this._lfoL.min = Math.max(this._delayTime - deviation, 0);\n this._lfoL.max = this._delayTime + deviation;\n this._lfoR.min = Math.max(this._delayTime - deviation, 0);\n this._lfoR.max = this._delayTime + deviation;\n }\n /**\n * The delayTime in milliseconds of the chorus. A larger delayTime\n * will give a more pronounced effect. Nominal range a delayTime\n * is between 2 and 20ms.\n */\n get delayTime() {\n return this._delayTime * 1000;\n }\n set delayTime(delayTime) {\n this._delayTime = delayTime / 1000;\n this.depth = this._depth;\n }\n /**\n * The oscillator type of the LFO.\n */\n get type() {\n return this._lfoL.type;\n }\n set type(type) {\n this._lfoL.type = type;\n this._lfoR.type = type;\n }\n /**\n * Amount of stereo spread. When set to 0, both LFO\'s will be panned centrally.\n * When set to 180, LFO\'s will be panned hard left and right respectively.\n */\n get spread() {\n return this._lfoR.phase - this._lfoL.phase;\n }\n set spread(spread) {\n this._lfoL.phase = 90 - (spread / 2);\n this._lfoR.phase = (spread / 2) + 90;\n }\n /**\n * Start the effect.\n */\n start(time) {\n this._lfoL.start(time);\n this._lfoR.start(time);\n return this;\n }\n /**\n * Stop the lfo\n */\n stop(time) {\n this._lfoL.stop(time);\n this._lfoR.stop(time);\n return this;\n }\n /**\n * Sync the filter to the transport. See [[LFO.sync]]\n */\n sync() {\n this._lfoL.sync();\n this._lfoR.sync();\n return this;\n }\n /**\n * Unsync the filter from the transport.\n */\n unsync() {\n this._lfoL.unsync();\n this._lfoR.unsync();\n return this;\n }\n dispose() {\n super.dispose();\n this._lfoL.dispose();\n this._lfoR.dispose();\n this._delayNodeL.dispose();\n this._delayNodeR.dispose();\n this.frequency.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Chorus.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Distortion.js\n\n\n\n/**\n * A simple distortion effect using Tone.WaveShaper.\n * Algorithm from [this stackoverflow answer](http://stackoverflow.com/a/22313408).\n *\n * @example\n * const dist = new Tone.Distortion(0.8).toDestination();\n * const fm = new Tone.FMSynth().connect(dist);\n * fm.triggerAttackRelease("A1", "8n");\n * @category Effect\n */\nclass Distortion extends (/* unused pure expression or super */ null && (Effect)) {\n constructor() {\n super(optionsFromArguments(Distortion.getDefaults(), arguments, ["distortion"]));\n this.name = "Distortion";\n const options = optionsFromArguments(Distortion.getDefaults(), arguments, ["distortion"]);\n this._shaper = new WaveShaper({\n context: this.context,\n length: 4096,\n });\n this._distortion = options.distortion;\n this.connectEffect(this._shaper);\n this.distortion = options.distortion;\n this.oversample = options.oversample;\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n distortion: 0.4,\n oversample: "none",\n });\n }\n /**\n * The amount of distortion. Nominal range is between 0 and 1.\n */\n get distortion() {\n return this._distortion;\n }\n set distortion(amount) {\n this._distortion = amount;\n const k = amount * 100;\n const deg = Math.PI / 180;\n this._shaper.setMap((x) => {\n if (Math.abs(x) < 0.001) {\n // should output 0 when input is 0\n return 0;\n }\n else {\n return (3 + k) * x * 20 * deg / (Math.PI + k * Math.abs(x));\n }\n });\n }\n /**\n * The oversampling of the effect. Can either be "none", "2x" or "4x".\n */\n get oversample() {\n return this._shaper.oversample;\n }\n set oversample(oversampling) {\n this._shaper.oversample = oversampling;\n }\n dispose() {\n super.dispose();\n this._shaper.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Distortion.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/FeedbackEffect.js\n\n\n\n/**\n * FeedbackEffect provides a loop between an audio source and its own output.\n * This is a base-class for feedback effects.\n */\nclass FeedbackEffect_FeedbackEffect extends (/* unused pure expression or super */ null && (Effect)) {\n constructor(options) {\n super(options);\n this.name = "FeedbackEffect";\n this._feedbackGain = new Gain({\n context: this.context,\n gain: options.feedback,\n units: "normalRange",\n });\n this.feedback = this._feedbackGain.gain;\n readOnly(this, "feedback");\n // the feedback loop\n this.effectReturn.chain(this._feedbackGain, this.effectSend);\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n feedback: 0.125,\n });\n }\n dispose() {\n super.dispose();\n this._feedbackGain.dispose();\n this.feedback.dispose();\n return this;\n }\n}\n//# sourceMappingURL=FeedbackEffect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/FeedbackDelay.js\n\n\n\n\n/**\n * FeedbackDelay is a DelayNode in which part of output signal is fed back into the delay.\n *\n * @param delayTime The delay applied to the incoming signal.\n * @param feedback The amount of the effected signal which is fed back through the delay.\n * @example\n * const feedbackDelay = new Tone.FeedbackDelay("8n", 0.5).toDestination();\n * const tom = new Tone.MembraneSynth({\n * \toctaves: 4,\n * \tpitchDecay: 0.1\n * }).connect(feedbackDelay);\n * tom.triggerAttackRelease("A2", "32n");\n * @category Effect\n */\nclass FeedbackDelay extends (/* unused pure expression or super */ null && (FeedbackEffect)) {\n constructor() {\n super(optionsFromArguments(FeedbackDelay.getDefaults(), arguments, ["delayTime", "feedback"]));\n this.name = "FeedbackDelay";\n const options = optionsFromArguments(FeedbackDelay.getDefaults(), arguments, ["delayTime", "feedback"]);\n this._delayNode = new Delay({\n context: this.context,\n delayTime: options.delayTime,\n maxDelay: options.maxDelay,\n });\n this.delayTime = this._delayNode.delayTime;\n // connect it up\n this.connectEffect(this._delayNode);\n readOnly(this, "delayTime");\n }\n static getDefaults() {\n return Object.assign(FeedbackEffect.getDefaults(), {\n delayTime: 0.25,\n maxDelay: 1,\n });\n }\n dispose() {\n super.dispose();\n this._delayNode.dispose();\n this.delayTime.dispose();\n return this;\n }\n}\n//# sourceMappingURL=FeedbackDelay.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/filter/PhaseShiftAllpass.js\n\n\n/**\n * PhaseShiftAllpass is an very efficient implementation of a Hilbert Transform\n * using two Allpass filter banks whose outputs have a phase difference of 90°.\n * Here the `offset90` phase is offset by +90° in relation to `output`.\n * Coefficients and structure was developed by Olli Niemitalo.\n * For more details see: http://yehar.com/blog/?p=368\n * @category Component\n */\nclass PhaseShiftAllpass_PhaseShiftAllpass extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor(options) {\n super(options);\n this.name = "PhaseShiftAllpass";\n this.input = new Gain({ context: this.context });\n /**\n * The phase shifted output\n */\n this.output = new Gain({ context: this.context });\n /**\n * The PhaseShifted allpass output\n */\n this.offset90 = new Gain({ context: this.context });\n const allpassBank1Values = [0.6923878, 0.9360654322959, 0.9882295226860, 0.9987488452737];\n const allpassBank2Values = [0.4021921162426, 0.8561710882420, 0.9722909545651, 0.9952884791278];\n this._bank0 = this._createAllPassFilterBank(allpassBank1Values);\n this._bank1 = this._createAllPassFilterBank(allpassBank2Values);\n this._oneSampleDelay = this.context.createIIRFilter([0.0, 1.0], [1.0, 0.0]);\n // connect Allpass filter banks\n connectSeries(this.input, ...this._bank0, this._oneSampleDelay, this.output);\n connectSeries(this.input, ...this._bank1, this.offset90);\n }\n /**\n * Create all of the IIR filters from an array of values using the coefficient calculation.\n */\n _createAllPassFilterBank(bankValues) {\n const nodes = bankValues.map(value => {\n const coefficients = [[value * value, 0, -1], [1, 0, -(value * value)]];\n return this.context.createIIRFilter(coefficients[0], coefficients[1]);\n });\n return nodes;\n }\n dispose() {\n super.dispose();\n this.input.dispose();\n this.output.dispose();\n this.offset90.dispose();\n this._bank0.forEach(f => f.disconnect());\n this._bank1.forEach(f => f.disconnect());\n this._oneSampleDelay.disconnect();\n return this;\n }\n}\n//# sourceMappingURL=PhaseShiftAllpass.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/FrequencyShifter.js\n\n\n\n\n\n\n\n\n\n/**\n * FrequencyShifter can be used to shift all frequencies of a signal by a fixed amount.\n * The amount can be changed at audio rate and the effect is applied in real time.\n * The frequency shifting is implemented with a technique called single side band modulation using a ring modulator.\n * Note: Contrary to pitch shifting, all frequencies are shifted by the same amount,\n * destroying the harmonic relationship between them. This leads to the classic ring modulator timbre distortion.\n * The algorithm will produces some aliasing towards the high end, especially if your source material\n * contains a lot of high frequencies. Unfortunatelly the webaudio API does not support resampling\n * buffers in real time, so it is not possible to fix it properly. Depending on the use case it might\n * be an option to low pass filter your input before frequency shifting it to get ride of the aliasing.\n * You can find a very detailed description of the algorithm here: https://larzeitlin.github.io/RMFS/\n *\n * @example\n * const input = new Tone.Oscillator(230, "sawtooth").start();\n * const shift = new Tone.FrequencyShifter(42).toDestination();\n * input.connect(shift);\n * @category Effect\n */\nclass FrequencyShifter extends (/* unused pure expression or super */ null && (Effect)) {\n constructor() {\n super(optionsFromArguments(FrequencyShifter.getDefaults(), arguments, ["frequency"]));\n this.name = "FrequencyShifter";\n const options = optionsFromArguments(FrequencyShifter.getDefaults(), arguments, ["frequency"]);\n this.frequency = new Signal({\n context: this.context,\n units: "frequency",\n value: options.frequency,\n minValue: -this.context.sampleRate / 2,\n maxValue: this.context.sampleRate / 2,\n });\n this._sine = new ToneOscillatorNode({\n context: this.context,\n type: "sine",\n });\n this._cosine = new Oscillator({\n context: this.context,\n phase: -90,\n type: "sine",\n });\n this._sineMultiply = new Multiply({ context: this.context });\n this._cosineMultiply = new Multiply({ context: this.context });\n this._negate = new Negate({ context: this.context });\n this._add = new Add({ context: this.context });\n this._phaseShifter = new PhaseShiftAllpass({ context: this.context });\n this.effectSend.connect(this._phaseShifter);\n // connect the carrier frequency signal to the two oscillators\n this.frequency.fan(this._sine.frequency, this._cosine.frequency);\n this._phaseShifter.offset90.connect(this._cosineMultiply);\n this._cosine.connect(this._cosineMultiply.factor);\n this._phaseShifter.connect(this._sineMultiply);\n this._sine.connect(this._sineMultiply.factor);\n this._sineMultiply.connect(this._negate);\n this._cosineMultiply.connect(this._add);\n this._negate.connect(this._add.addend);\n this._add.connect(this.effectReturn);\n // start the oscillators at the same time\n const now = this.immediate();\n this._sine.start(now);\n this._cosine.start(now);\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n frequency: 0,\n });\n }\n dispose() {\n super.dispose();\n this.frequency.dispose();\n this._add.dispose();\n this._cosine.dispose();\n this._cosineMultiply.dispose();\n this._negate.dispose();\n this._phaseShifter.dispose();\n this._sine.dispose();\n this._sineMultiply.dispose();\n return this;\n }\n}\n//# sourceMappingURL=FrequencyShifter.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Freeverb.js\n\n\n\n\n\n/**\n * An array of comb filter delay values from Freeverb implementation\n */\nconst combFilterTunings = (/* unused pure expression or super */ null && ([1557 / 44100, 1617 / 44100, 1491 / 44100, 1422 / 44100, 1277 / 44100, 1356 / 44100, 1188 / 44100, 1116 / 44100]));\n/**\n * An array of allpass filter frequency values from Freeverb implementation\n */\nconst allpassFilterFrequencies = (/* unused pure expression or super */ null && ([225, 556, 441, 341]));\n/**\n * Freeverb is a reverb based on [Freeverb](https://ccrma.stanford.edu/~jos/pasp/Freeverb.html).\n * Read more on reverb on [Sound On Sound](https://web.archive.org/web/20160404083902/http://www.soundonsound.com:80/sos/feb01/articles/synthsecrets.asp).\n * Freeverb is now implemented with an AudioWorkletNode which may result on performance degradation on some platforms. Consider using [[Reverb]].\n * @example\n * const freeverb = new Tone.Freeverb().toDestination();\n * freeverb.dampening = 1000;\n * // routing synth through the reverb\n * const synth = new Tone.NoiseSynth().connect(freeverb);\n * synth.triggerAttackRelease(0.05);\n * @category Effect\n */\nclass Freeverb extends (/* unused pure expression or super */ null && (StereoEffect)) {\n constructor() {\n super(optionsFromArguments(Freeverb.getDefaults(), arguments, ["roomSize", "dampening"]));\n this.name = "Freeverb";\n /**\n * the comb filters\n */\n this._combFilters = [];\n /**\n * the allpass filters on the left\n */\n this._allpassFiltersL = [];\n /**\n * the allpass filters on the right\n */\n this._allpassFiltersR = [];\n const options = optionsFromArguments(Freeverb.getDefaults(), arguments, ["roomSize", "dampening"]);\n this.roomSize = new Signal({\n context: this.context,\n value: options.roomSize,\n units: "normalRange",\n });\n // make the allpass filters on the right\n this._allpassFiltersL = allpassFilterFrequencies.map(freq => {\n const allpassL = this.context.createBiquadFilter();\n allpassL.type = "allpass";\n allpassL.frequency.value = freq;\n return allpassL;\n });\n // make the allpass filters on the left\n this._allpassFiltersR = allpassFilterFrequencies.map(freq => {\n const allpassR = this.context.createBiquadFilter();\n allpassR.type = "allpass";\n allpassR.frequency.value = freq;\n return allpassR;\n });\n // make the comb filters\n this._combFilters = combFilterTunings.map((delayTime, index) => {\n const lfpf = new LowpassCombFilter({\n context: this.context,\n dampening: options.dampening,\n delayTime,\n });\n if (index < combFilterTunings.length / 2) {\n this.connectEffectLeft(lfpf, ...this._allpassFiltersL);\n }\n else {\n this.connectEffectRight(lfpf, ...this._allpassFiltersR);\n }\n this.roomSize.connect(lfpf.resonance);\n return lfpf;\n });\n readOnly(this, ["roomSize"]);\n }\n static getDefaults() {\n return Object.assign(StereoEffect.getDefaults(), {\n roomSize: 0.7,\n dampening: 3000\n });\n }\n /**\n * The amount of dampening of the reverberant signal.\n */\n get dampening() {\n return this._combFilters[0].dampening;\n }\n set dampening(d) {\n this._combFilters.forEach(c => c.dampening = d);\n }\n dispose() {\n super.dispose();\n this._allpassFiltersL.forEach(al => al.disconnect());\n this._allpassFiltersR.forEach(ar => ar.disconnect());\n this._combFilters.forEach(cf => cf.dispose());\n this.roomSize.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Freeverb.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/JCReverb.js\n\n\n\n\n\n\n/**\n * an array of the comb filter delay time values\n */\nconst combFilterDelayTimes = (/* unused pure expression or super */ null && ([1687 / 25000, 1601 / 25000, 2053 / 25000, 2251 / 25000]));\n/**\n * the resonances of each of the comb filters\n */\nconst combFilterResonances = (/* unused pure expression or super */ null && ([0.773, 0.802, 0.753, 0.733]));\n/**\n * the allpass filter frequencies\n */\nconst allpassFilterFreqs = (/* unused pure expression or super */ null && ([347, 113, 37]));\n/**\n * JCReverb is a simple [Schroeder Reverberator](https://ccrma.stanford.edu/~jos/pasp/Schroeder_Reverberators.html)\n * tuned by John Chowning in 1970.\n * It is made up of three allpass filters and four [[FeedbackCombFilter]].\n * JCReverb is now implemented with an AudioWorkletNode which may result on performance degradation on some platforms. Consider using [[Reverb]].\n * @example\n * const reverb = new Tone.JCReverb(0.4).toDestination();\n * const delay = new Tone.FeedbackDelay(0.5);\n * // connecting the synth to reverb through delay\n * const synth = new Tone.DuoSynth().chain(delay, reverb);\n * synth.triggerAttackRelease("A4", "8n");\n *\n * @category Effect\n */\nclass JCReverb extends (/* unused pure expression or super */ null && (StereoEffect)) {\n constructor() {\n super(optionsFromArguments(JCReverb.getDefaults(), arguments, ["roomSize"]));\n this.name = "JCReverb";\n /**\n * a series of allpass filters\n */\n this._allpassFilters = [];\n /**\n * parallel feedback comb filters\n */\n this._feedbackCombFilters = [];\n const options = optionsFromArguments(JCReverb.getDefaults(), arguments, ["roomSize"]);\n this.roomSize = new Signal({\n context: this.context,\n value: options.roomSize,\n units: "normalRange",\n });\n this._scaleRoomSize = new Scale({\n context: this.context,\n min: -0.733,\n max: 0.197,\n });\n // make the allpass filters\n this._allpassFilters = allpassFilterFreqs.map(freq => {\n const allpass = this.context.createBiquadFilter();\n allpass.type = "allpass";\n allpass.frequency.value = freq;\n return allpass;\n });\n // and the comb filters\n this._feedbackCombFilters = combFilterDelayTimes.map((delayTime, index) => {\n const fbcf = new FeedbackCombFilter({\n context: this.context,\n delayTime,\n });\n this._scaleRoomSize.connect(fbcf.resonance);\n fbcf.resonance.value = combFilterResonances[index];\n if (index < combFilterDelayTimes.length / 2) {\n this.connectEffectLeft(...this._allpassFilters, fbcf);\n }\n else {\n this.connectEffectRight(...this._allpassFilters, fbcf);\n }\n return fbcf;\n });\n // chain the allpass filters together\n this.roomSize.connect(this._scaleRoomSize);\n readOnly(this, ["roomSize"]);\n }\n static getDefaults() {\n return Object.assign(StereoEffect.getDefaults(), {\n roomSize: 0.5,\n });\n }\n dispose() {\n super.dispose();\n this._allpassFilters.forEach(apf => apf.disconnect());\n this._feedbackCombFilters.forEach(fbcf => fbcf.dispose());\n this.roomSize.dispose();\n this._scaleRoomSize.dispose();\n return this;\n }\n}\n//# sourceMappingURL=JCReverb.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/StereoXFeedbackEffect.js\n\n\n/**\n * Just like a [[StereoFeedbackEffect]], but the feedback is routed from left to right\n * and right to left instead of on the same channel.\n * ```\n * +--------------------------------+ feedbackL <-----------------------------------+\n * | |\n * +--\x3e +-----\x3e +----\x3e +-----+\n * feedbackMerge +--\x3e split (EFFECT) merge +--\x3e feedbackSplit | |\n * +--\x3e +-----\x3e +----\x3e +---+ |\n * | |\n * +--------------------------------+ feedbackR <-------------------------------------+\n * ```\n */\nclass StereoXFeedbackEffect_StereoXFeedbackEffect extends (/* unused pure expression or super */ null && (StereoFeedbackEffect)) {\n constructor(options) {\n super(options);\n // the left output connected to the right input\n this._feedbackL.disconnect();\n this._feedbackL.connect(this._feedbackMerge, 0, 1);\n // the left output connected to the right input\n this._feedbackR.disconnect();\n this._feedbackR.connect(this._feedbackMerge, 0, 0);\n readOnly(this, ["feedback"]);\n }\n}\n//# sourceMappingURL=StereoXFeedbackEffect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/PingPongDelay.js\n\n\n\n\n\n/**\n * PingPongDelay is a feedback delay effect where the echo is heard\n * first in one channel and next in the opposite channel. In a stereo\n * system these are the right and left channels.\n * PingPongDelay in more simplified terms is two Tone.FeedbackDelays\n * with independent delay values. Each delay is routed to one channel\n * (left or right), and the channel triggered second will always\n * trigger at the same interval after the first.\n * @example\n * const pingPong = new Tone.PingPongDelay("4n", 0.2).toDestination();\n * const drum = new Tone.MembraneSynth().connect(pingPong);\n * drum.triggerAttackRelease("C4", "32n");\n * @category Effect\n */\nclass PingPongDelay extends (/* unused pure expression or super */ null && (StereoXFeedbackEffect)) {\n constructor() {\n super(optionsFromArguments(PingPongDelay.getDefaults(), arguments, ["delayTime", "feedback"]));\n this.name = "PingPongDelay";\n const options = optionsFromArguments(PingPongDelay.getDefaults(), arguments, ["delayTime", "feedback"]);\n this._leftDelay = new Delay({\n context: this.context,\n maxDelay: options.maxDelay,\n });\n this._rightDelay = new Delay({\n context: this.context,\n maxDelay: options.maxDelay\n });\n this._rightPreDelay = new Delay({\n context: this.context,\n maxDelay: options.maxDelay\n });\n this.delayTime = new Signal({\n context: this.context,\n units: "time",\n value: options.delayTime,\n });\n // connect it up\n this.connectEffectLeft(this._leftDelay);\n this.connectEffectRight(this._rightPreDelay, this._rightDelay);\n this.delayTime.fan(this._leftDelay.delayTime, this._rightDelay.delayTime, this._rightPreDelay.delayTime);\n // rearranged the feedback to be after the rightPreDelay\n this._feedbackL.disconnect();\n this._feedbackL.connect(this._rightDelay);\n readOnly(this, ["delayTime"]);\n }\n static getDefaults() {\n return Object.assign(StereoXFeedbackEffect.getDefaults(), {\n delayTime: 0.25,\n maxDelay: 1\n });\n }\n dispose() {\n super.dispose();\n this._leftDelay.dispose();\n this._rightDelay.dispose();\n this._rightPreDelay.dispose();\n this.delayTime.dispose();\n return this;\n }\n}\n//# sourceMappingURL=PingPongDelay.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/PitchShift.js\n\n\n\n\n\n\n\n\n/**\n * PitchShift does near-realtime pitch shifting to the incoming signal.\n * The effect is achieved by speeding up or slowing down the delayTime\n * of a DelayNode using a sawtooth wave.\n * Algorithm found in [this pdf](http://dsp-book.narod.ru/soundproc.pdf).\n * Additional reference by [Miller Pucket](http://msp.ucsd.edu/techniques/v0.11/book-html/node115.html).\n * @category Effect\n */\nclass PitchShift extends (/* unused pure expression or super */ null && (FeedbackEffect)) {\n constructor() {\n super(optionsFromArguments(PitchShift.getDefaults(), arguments, ["pitch"]));\n this.name = "PitchShift";\n const options = optionsFromArguments(PitchShift.getDefaults(), arguments, ["pitch"]);\n this._frequency = new Signal({ context: this.context });\n this._delayA = new Delay({\n maxDelay: 1,\n context: this.context\n });\n this._lfoA = new LFO({\n context: this.context,\n min: 0,\n max: 0.1,\n type: "sawtooth"\n }).connect(this._delayA.delayTime);\n this._delayB = new Delay({\n maxDelay: 1,\n context: this.context\n });\n this._lfoB = new LFO({\n context: this.context,\n min: 0,\n max: 0.1,\n type: "sawtooth",\n phase: 180\n }).connect(this._delayB.delayTime);\n this._crossFade = new CrossFade({ context: this.context });\n this._crossFadeLFO = new LFO({\n context: this.context,\n min: 0,\n max: 1,\n type: "triangle",\n phase: 90\n }).connect(this._crossFade.fade);\n this._feedbackDelay = new Delay({\n delayTime: options.delayTime,\n context: this.context,\n });\n this.delayTime = this._feedbackDelay.delayTime;\n readOnly(this, "delayTime");\n this._pitch = options.pitch;\n this._windowSize = options.windowSize;\n // connect the two delay lines up\n this._delayA.connect(this._crossFade.a);\n this._delayB.connect(this._crossFade.b);\n // connect the frequency\n this._frequency.fan(this._lfoA.frequency, this._lfoB.frequency, this._crossFadeLFO.frequency);\n // route the input\n this.effectSend.fan(this._delayA, this._delayB);\n this._crossFade.chain(this._feedbackDelay, this.effectReturn);\n // start the LFOs at the same time\n const now = this.now();\n this._lfoA.start(now);\n this._lfoB.start(now);\n this._crossFadeLFO.start(now);\n // set the initial value\n this.windowSize = this._windowSize;\n }\n static getDefaults() {\n return Object.assign(FeedbackEffect.getDefaults(), {\n pitch: 0,\n windowSize: 0.1,\n delayTime: 0,\n feedback: 0\n });\n }\n /**\n * Repitch the incoming signal by some interval (measured in semi-tones).\n * @example\n * const pitchShift = new Tone.PitchShift().toDestination();\n * const osc = new Tone.Oscillator().connect(pitchShift).start().toDestination();\n * pitchShift.pitch = -12; // down one octave\n * pitchShift.pitch = 7; // up a fifth\n */\n get pitch() {\n return this._pitch;\n }\n set pitch(interval) {\n this._pitch = interval;\n let factor = 0;\n if (interval < 0) {\n this._lfoA.min = 0;\n this._lfoA.max = this._windowSize;\n this._lfoB.min = 0;\n this._lfoB.max = this._windowSize;\n factor = intervalToFrequencyRatio(interval - 1) + 1;\n }\n else {\n this._lfoA.min = this._windowSize;\n this._lfoA.max = 0;\n this._lfoB.min = this._windowSize;\n this._lfoB.max = 0;\n factor = intervalToFrequencyRatio(interval) - 1;\n }\n this._frequency.value = factor * (1.2 / this._windowSize);\n }\n /**\n * The window size corresponds roughly to the sample length in a looping sampler.\n * Smaller values are desirable for a less noticeable delay time of the pitch shifted\n * signal, but larger values will result in smoother pitch shifting for larger intervals.\n * A nominal range of 0.03 to 0.1 is recommended.\n */\n get windowSize() {\n return this._windowSize;\n }\n set windowSize(size) {\n this._windowSize = this.toSeconds(size);\n this.pitch = this._pitch;\n }\n dispose() {\n super.dispose();\n this._frequency.dispose();\n this._delayA.dispose();\n this._delayB.dispose();\n this._lfoA.dispose();\n this._lfoB.dispose();\n this._crossFade.dispose();\n this._crossFadeLFO.dispose();\n this._feedbackDelay.dispose();\n return this;\n }\n}\n//# sourceMappingURL=PitchShift.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Phaser.js\n\n\n\n\n\n/**\n * Phaser is a phaser effect. Phasers work by changing the phase\n * of different frequency components of an incoming signal. Read more on\n * [Wikipedia](https://en.wikipedia.org/wiki/Phaser_(effect)).\n * Inspiration for this phaser comes from [Tuna.js](https://github.com/Dinahmoe/tuna/).\n * @example\n * const phaser = new Tone.Phaser({\n * \tfrequency: 15,\n * \toctaves: 5,\n * \tbaseFrequency: 1000\n * }).toDestination();\n * const synth = new Tone.FMSynth().connect(phaser);\n * synth.triggerAttackRelease("E3", "2n");\n * @category Effect\n */\nclass Phaser extends (/* unused pure expression or super */ null && (StereoEffect)) {\n constructor() {\n super(optionsFromArguments(Phaser.getDefaults(), arguments, ["frequency", "octaves", "baseFrequency"]));\n this.name = "Phaser";\n const options = optionsFromArguments(Phaser.getDefaults(), arguments, ["frequency", "octaves", "baseFrequency"]);\n this._lfoL = new LFO({\n context: this.context,\n frequency: options.frequency,\n min: 0,\n max: 1\n });\n this._lfoR = new LFO({\n context: this.context,\n frequency: options.frequency,\n min: 0,\n max: 1,\n phase: 180,\n });\n this._baseFrequency = this.toFrequency(options.baseFrequency);\n this._octaves = options.octaves;\n this.Q = new Signal({\n context: this.context,\n value: options.Q,\n units: "positive",\n });\n this._filtersL = this._makeFilters(options.stages, this._lfoL);\n this._filtersR = this._makeFilters(options.stages, this._lfoR);\n this.frequency = this._lfoL.frequency;\n this.frequency.value = options.frequency;\n // connect them up\n this.connectEffectLeft(...this._filtersL);\n this.connectEffectRight(...this._filtersR);\n // control the frequency with one LFO\n this._lfoL.frequency.connect(this._lfoR.frequency);\n // set the options\n this.baseFrequency = options.baseFrequency;\n this.octaves = options.octaves;\n // start the lfo\n this._lfoL.start();\n this._lfoR.start();\n readOnly(this, ["frequency", "Q"]);\n }\n static getDefaults() {\n return Object.assign(StereoEffect.getDefaults(), {\n frequency: 0.5,\n octaves: 3,\n stages: 10,\n Q: 10,\n baseFrequency: 350,\n });\n }\n _makeFilters(stages, connectToFreq) {\n const filters = [];\n // make all the filters\n for (let i = 0; i < stages; i++) {\n const filter = this.context.createBiquadFilter();\n filter.type = "allpass";\n this.Q.connect(filter.Q);\n connectToFreq.connect(filter.frequency);\n filters.push(filter);\n }\n return filters;\n }\n /**\n * The number of octaves the phase goes above the baseFrequency\n */\n get octaves() {\n return this._octaves;\n }\n set octaves(octaves) {\n this._octaves = octaves;\n const max = this._baseFrequency * Math.pow(2, octaves);\n this._lfoL.max = max;\n this._lfoR.max = max;\n }\n /**\n * The the base frequency of the filters.\n */\n get baseFrequency() {\n return this._baseFrequency;\n }\n set baseFrequency(freq) {\n this._baseFrequency = this.toFrequency(freq);\n this._lfoL.min = this._baseFrequency;\n this._lfoR.min = this._baseFrequency;\n this.octaves = this._octaves;\n }\n dispose() {\n super.dispose();\n this.Q.dispose();\n this._lfoL.dispose();\n this._lfoR.dispose();\n this._filtersL.forEach(f => f.disconnect());\n this._filtersR.forEach(f => f.disconnect());\n this.frequency.dispose();\n return this;\n }\n}\n//# sourceMappingURL=Phaser.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/Reverb.js\n\n\n\n\n\n\n\n\n\n/**\n * Simple convolution created with decaying noise.\n * Generates an Impulse Response Buffer\n * with Tone.Offline then feeds the IR into ConvolverNode.\n * The impulse response generation is async, so you have\n * to wait until [[ready]] resolves before it will make a sound.\n *\n * Inspiration from [ReverbGen](https://github.com/adelespinasse/reverbGen).\n * Copyright (c) 2014 Alan deLespinasse Apache 2.0 License.\n *\n * @category Effect\n */\nclass Reverb extends (/* unused pure expression or super */ null && (Effect)) {\n constructor() {\n super(optionsFromArguments(Reverb.getDefaults(), arguments, ["decay"]));\n this.name = "Reverb";\n /**\n * Convolver node\n */\n this._convolver = this.context.createConvolver();\n /**\n * Resolves when the reverb buffer is generated. Whenever either [[decay]]\n * or [[preDelay]] are set, you have to wait until [[ready]] resolves\n * before the IR is generated with the latest values.\n */\n this.ready = Promise.resolve();\n const options = optionsFromArguments(Reverb.getDefaults(), arguments, ["decay"]);\n this._decay = options.decay;\n this._preDelay = options.preDelay;\n this.generate();\n this.connectEffect(this._convolver);\n }\n static getDefaults() {\n return Object.assign(Effect.getDefaults(), {\n decay: 1.5,\n preDelay: 0.01,\n });\n }\n /**\n * The duration of the reverb.\n */\n get decay() {\n return this._decay;\n }\n set decay(time) {\n time = this.toSeconds(time);\n assertRange(time, 0.001);\n this._decay = time;\n this.generate();\n }\n /**\n * The amount of time before the reverb is fully ramped in.\n */\n get preDelay() {\n return this._preDelay;\n }\n set preDelay(time) {\n time = this.toSeconds(time);\n assertRange(time, 0);\n this._preDelay = time;\n this.generate();\n }\n /**\n * Generate the Impulse Response. Returns a promise while the IR is being generated.\n * @return Promise which returns this object.\n */\n generate() {\n return __awaiter(this, void 0, void 0, function* () {\n const previousReady = this.ready;\n // create a noise burst which decays over the duration in each channel\n const context = new OfflineContext(2, this._decay + this._preDelay, this.context.sampleRate);\n const noiseL = new Noise({ context });\n const noiseR = new Noise({ context });\n const merge = new Merge({ context });\n noiseL.connect(merge, 0, 0);\n noiseR.connect(merge, 0, 1);\n const gainNode = new Gain({ context }).toDestination();\n merge.connect(gainNode);\n noiseL.start(0);\n noiseR.start(0);\n // predelay\n gainNode.gain.setValueAtTime(0, 0);\n gainNode.gain.setValueAtTime(1, this._preDelay);\n // decay\n gainNode.gain.exponentialApproachValueAtTime(0, this._preDelay, this.decay);\n // render the buffer\n const renderPromise = context.render();\n this.ready = renderPromise.then(noOp);\n // wait for the previous `ready` to resolve\n yield previousReady;\n // set the buffer\n this._convolver.buffer = (yield renderPromise).get();\n return this;\n });\n }\n dispose() {\n super.dispose();\n this._convolver.disconnect();\n return this;\n }\n}\n//# sourceMappingURL=Reverb.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/channel/MidSideSplit.js\n\n\n\n\n\n\n/**\n * Mid/Side processing separates the the \'mid\' signal (which comes out of both the left and the right channel)\n * and the \'side\' (which only comes out of the the side channels).\n * ```\n * Mid = (Left+Right)/sqrt(2); // obtain mid-signal from left and right\n * Side = (Left-Right)/sqrt(2); // obtain side-signal from left and right\n * ```\n * @category Component\n */\nclass MidSideSplit_MidSideSplit extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(MidSideSplit_MidSideSplit.getDefaults(), arguments));\n this.name = "MidSideSplit";\n this._split = this.input = new Split({\n channels: 2,\n context: this.context\n });\n this._midAdd = new Add({ context: this.context });\n this.mid = new Multiply({\n context: this.context,\n value: Math.SQRT1_2,\n });\n this._sideSubtract = new Subtract({ context: this.context });\n this.side = new Multiply({\n context: this.context,\n value: Math.SQRT1_2,\n });\n this._split.connect(this._midAdd, 0);\n this._split.connect(this._midAdd.addend, 1);\n this._split.connect(this._sideSubtract, 0);\n this._split.connect(this._sideSubtract.subtrahend, 1);\n this._midAdd.connect(this.mid);\n this._sideSubtract.connect(this.side);\n }\n dispose() {\n super.dispose();\n this.mid.dispose();\n this.side.dispose();\n this._midAdd.dispose();\n this._sideSubtract.dispose();\n this._split.dispose();\n return this;\n }\n}\n//# sourceMappingURL=MidSideSplit.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/component/channel/MidSideMerge.js\n\n\n\n\n\n\n\n/**\n * MidSideMerge merges the mid and side signal after they\'ve been separated by [[MidSideSplit]]\n * ```\n * Mid = (Left+Right)/sqrt(2); // obtain mid-signal from left and right\n * Side = (Left-Right)/sqrt(2); // obtain side-signal from left and right\n * ```\n * @category Component\n */\nclass MidSideMerge_MidSideMerge extends (/* unused pure expression or super */ null && (ToneAudioNode)) {\n constructor() {\n super(optionsFromArguments(MidSideMerge_MidSideMerge.getDefaults(), arguments));\n this.name = "MidSideMerge";\n this.mid = new Gain({ context: this.context });\n this.side = new Gain({ context: this.context });\n this._left = new Add({ context: this.context });\n this._leftMult = new Multiply({\n context: this.context,\n value: Math.SQRT1_2\n });\n this._right = new Subtract({ context: this.context });\n this._rightMult = new Multiply({\n context: this.context,\n value: Math.SQRT1_2\n });\n this._merge = this.output = new Merge({ context: this.context });\n this.mid.fan(this._left);\n this.side.connect(this._left.addend);\n this.mid.connect(this._right);\n this.side.connect(this._right.subtrahend);\n this._left.connect(this._leftMult);\n this._right.connect(this._rightMult);\n this._leftMult.connect(this._merge, 0, 0);\n this._rightMult.connect(this._merge, 0, 1);\n }\n dispose() {\n super.dispose();\n this.mid.dispose();\n this.side.dispose();\n this._leftMult.dispose();\n this._rightMult.dispose();\n this._left.dispose();\n this._right.dispose();\n return this;\n }\n}\n//# sourceMappingURL=MidSideMerge.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/MidSideEffect.js\n\n\n\n/**\n * Mid/Side processing separates the the \'mid\' signal\n * (which comes out of both the left and the right channel)\n * and the \'side\' (which only comes out of the the side channels)\n * and effects them separately before being recombined.\n * Applies a Mid/Side seperation and recombination.\n * Algorithm found in [kvraudio forums](http://www.kvraudio.com/forum/viewtopic.php?t=212587).\n * This is a base-class for Mid/Side Effects.\n * @category Effect\n */\nclass MidSideEffect_MidSideEffect extends (/* unused pure expression or super */ null && (Effect)) {\n constructor(options) {\n super(options);\n this.name = "MidSideEffect";\n this._midSideMerge = new MidSideMerge({ context: this.context });\n this._midSideSplit = new MidSideSplit({ context: this.context });\n this._midSend = this._midSideSplit.mid;\n this._sideSend = this._midSideSplit.side;\n this._midReturn = this._midSideMerge.mid;\n this._sideReturn = this._midSideMerge.side;\n // the connections\n this.effectSend.connect(this._midSideSplit);\n this._midSideMerge.connect(this.effectReturn);\n }\n /**\n * Connect the mid chain of the effect\n */\n connectEffectMid(...nodes) {\n this._midSend.chain(...nodes, this._midReturn);\n }\n /**\n * Connect the side chain of the effect\n */\n connectEffectSide(...nodes) {\n this._sideSend.chain(...nodes, this._sideReturn);\n }\n dispose() {\n super.dispose();\n this._midSideSplit.dispose();\n this._midSideMerge.dispose();\n this._midSend.dispose();\n this._sideSend.dispose();\n this._midReturn.dispose();\n this._sideReturn.dispose();\n return this;\n }\n}\n//# sourceMappingURL=MidSideEffect.js.map\n;// CONCATENATED MODULE: ./node_modules/tone/build/esm/effect/StereoWidener.js\n\n\n\n\n\n\n\n/**\n * Applies a width factor to the mid/side seperation.\n * 0 is all mid and 1 is all side.\n * Algorithm found in [kvraudio forums](http://www.kvraudio.com/forum/viewtopic.php?t=212587).\n * ```\n * Mid *= 2*(1-width)