import Tone from 'tone' import Sampler from './lib/sampler' import draw from './draw' import keys from './lib/keys' import color from './lib/color' import mouse from './lib/mouse' import output from './lib/output' import spectrum from './lib/spectrum' import { Hall } from './lib/hall' import { browser, requestAudioContext, randint, randrange, choice, clamp, lerp, dist, shuffle, isMobile, } from './lib/util' const root = 440 const s = 50 const w = window.innerWidth const h = window.innerHeight const ws = w/s, hs = h/s const HALLWAY_LENGTH = 147 const SPEAKER_COUNT = 16 let notes = [299, 336, 374, 399, 449, 498, 561, 598].map(i => i/2) notes = notes.concat(notes.map(i => i/2)) notes = notes.concat(notes.map(i => i*2)) notes = shuffle(notes) let samplers = {} let sampler requestAudioContext( () => { // samplers.smash = new Sampler('samples/smash/g{}.mp3', 12) samplers.earth = new Sampler('samples/earth/earth{}.wav', 20) // samplers.glass = new Sampler('samples/glass/0{}Particle.mp3', 20) // samplers.kalimba = new Sampler('samples/kalimba/380731__cabled-mess__sansula-08-c-raw.wav', 10) sampler = samplers.earth samplers.choice = (m,n) => { const r = Math.random() if (r < m) return samplers.smash if (r < m+n) return samplers.kalimba return samplers.glass } Tone.Buffer.on('load', function(){ console.log('all buffers are loaded.') // redraw() }) }) const hall = new Hall ({ length: HALLWAY_LENGTH, speakers: SPEAKER_COUNT, }) function redraw(){ draw.clear() } function manipulate(spec){ const data = spec.data const sr = spec.sr const fft_size = spec.fft_size const fft_overlap = spec.fft_overlap const spec_len = data.length let i, j, u, v, _r, _i let aa = [] for (i = 0; i < fft_size; i++) { aa[i] = i } shuffle(aa) let new_data = [], new_col, col let bands = 2 << 4 let band, band_index let band_size = Math.floor(fft_size / bands) for (i = 0; i < spec_len; i++) { col = data[i] new_col = new_data[i] = data[i].concat() data[i][2] = 0 for (j = 0; j < fft_size; j++) { band = Math.floor(j / band_size) * band_size band_index = j % band_size new_col[j] = col[ band + (band_size - band_index) ] // spectrum inversion // new_col[j] = data[i][ fft_size - j - 1] // erase mirrored half of fft new_col[j + fft_size] = 0 } new_col[2] = 0 } spec.data = new_data //.reverse() // col = data[i] // for (j = 0; j < fft_size; j++) { // _r = j*2 // _i = j*2+1 // col[_r] = col[_r] / 2 // col[_i] = col[_i] // } // } } keys.listen(index => { // trigger(Math.random(), ((index+7) % SPEAKER_COUNT) / SPEAKER_COUNT, 0, samplers.smash) const sample = sampler.play(100, Tone.now(), output) const buf = sample._buffer.get() if (! buf) return const pcm = buf.getChannelData(0) const sr = buf.sampleRate const duration = buf.duration const spec = spectrum.toSpectrum(pcm, sr) draw.clear() draw.waveform(pcm) draw.spectrum(spec, 0, window.innerHeight/4 + 20) manipulate(spec) const audioBuffer = spectrum.fromSpectrum(spec) const player = new Tone.Player(audioBuffer) player.connect(output) player.start(Tone.now() + pcm.length / sr) // const new_spec = spectrum.toSpectrum(audioBuffer.getChannelData(0), sr) draw.spectrum(spec, 0, window.innerHeight * 1/2 + 40) }) mouse.register({ down: (x, y) => { redraw() }, move: (x, y, dx, dy) => { }, up: (x, y) => { }, }) let timeout, px = 0, py = 0 function play(x, y){ } function trigger(x, y, t, sampler){ t = t || 0 t += Tone.now() sampler = sampler || last_dist > 40 ? samplers.choice(0.2, 0.2) : samplers.choice((1-y) * 0.2, y*0.02) const freq = notes[Math.floor(x * notes.length)] const speaker = hall.play(sampler, y, freq, x, t) draw.triangle( lerp(x, 0, 1) * window.innerWidth, lerp(y, 0, 1) * window.innerHeight - 20, 40 ) }