import Tone from 'tone' import output from './output' const signalWindows = require('signal-windows').windows const FFTJS = require('fft.js') const fft_size = 2 << 10 const fft_overlap = fft_size / 4 const half_fft_size = fft_size / 2 const fft = new FFTJS(fft_size) function toSpectrum(pcm, sr){ sr = sr || 44100 const ham = signalWindows.construct('ham', fft_size) const pcm_in = new Array(fft_size) const pcm_length = pcm.length const pcm_q_length = Math.ceil(pcm_length / fft_size) * fft_size let i, j, fft_out, data = []; for (i = -fft_size; i < pcm_q_length; i += fft_overlap) { for (j = 0; j < fft_size; j++) { pcm_in[j] = pcm[i+j] * ham[j] || 0 } fft_out = fft.createComplexArray() fft.realTransform(fft_out, pcm_in) fft.completeSpectrum(fft_out) data.push(fft_out) } return { data, sr, fft_size, fft_overlap, } } function fromSpectrum(spec){ const data = spec.data const sr = spec.sr const fft_size = spec.fft_size const fft_overlap = spec.fft_overlap const spec_len = data.length const ham = signalWindows.construct('ham', fft_size) const out = fft.createComplexArray() const pcm_length = fft_overlap * spec_len const audioBuffer = Tone.context.createBuffer(1, pcm_length, sr) const pcm = audioBuffer.getChannelData(0); let i, j, u, v, _r, _i, col for (i = 0; i < spec_len; i++) { col = data[i] fft.inverseTransform(out, col) u = i * (fft_overlap) // for (j = fft_size * 1/4; j < fft_size * 3/4; j++) { // pcm[u+j] = out[j*2] / ham[j] || 0 // } for (j = 0; j < fft_size; j++) { pcm[u+j] += out[j*2] * ham[j] || 0 } } fadeInOut(pcm, fft_size) return audioBuffer } function fadeInOut(pcm, fade_size){ const pcm_length = pcm.length let fade = 0, i for (i = 0; i < fade_size; i++) { fade = i / (fade_size) fade *= fade pcm[i] *= fade pcm[pcm_length - i] *= fade } } function fadeOut(pcm){ } export default { toSpectrum, fromSpectrum }