在Wavesurfer中使用一个单独的AudioContext脚本处理器节点。

问题描述 投票:0回答:1

我试图使用一个独立于Wavesurfer默认的contexcript处理器,这样我就可以独立于播放速率来操纵音频的音调。当我试图将contextscript处理器作为参数,并回放音频时,我没有得到任何声音。

我的Waveform组件。

const playbackEngine = new PlaybackEngine({
                emitter: emitter,
                pitch: pitch,
        });

const Waveform = WaveSurfer.create({
            audioContext: playbackEngine.context,
            audioScriptProcessor: playbackEngine.scriptProcessor,
            barWidth: 1,
            cursorWidth: 1,
            pixelRatio: 1,
            container: '#audio-spectrum',
            progressColor: '#03a9f4',
            height: 100,
            normalize: true,
            responsive: true,
            waveColor: '#ccc',
            cursorColor: '#4a74a5'
        });

// called in ComponentDidMount()

function loadMediaUrl(url) {
    var request = new XMLHttpRequest();
    request.open('GET', url, true);
    request.responseType = 'arraybuffer';

    // Decode asynchronously
    request.onload = async function() {
        let buffer = request.response;

        // sets arrayBuffer for Playback Engine
        const audioBuff =  await playbackEngine.decodeAudioData(buffer, (error) => {
                        console.error(`Error decoding audio:`, error);
        });

        // sets audioBuffer for Wavesurfer to render Waveform (where I believe the problem 
        // begins)
        Waveform.loadDecodedBuffer(audioBuff);

        // sets audioBuffer for Playback Engine to playback audio
        playbackEngine.setBuffer(audioBuff);
    }
    request.send();
}

Playback.js


const {SimpleFilter, SoundTouch} = require('./soundtouch');

const BUFFER_SIZE = 4096;

class PlaybackEngine {
    constructor({emitter, pitch}) {
        this.emitter = emitter;
        this.context = new (window.AudioContext || window.webkitAudioContext);
        this.scriptProcessor = this.context.createScriptProcessor(BUFFER_SIZE, 2, 2);

        this.scriptProcessor.onaudioprocess = e => {
            const l = e.outputBuffer.getChannelData(0);
            const r = e.outputBuffer.getChannelData(1);
            const framesExtracted = this.simpleFilter.extract(this.samples, BUFFER_SIZE);
            if (framesExtracted === 0) {
                this.emitter.emit('stop');
            }
            for (let i = 0; i < framesExtracted; i++) {
                l[i] = this.samples[i * 2];
                r[i] = this.samples[i * 2 + 1];
            }
        };

        this.soundTouch = new SoundTouch();
        this.soundTouch.pitch = pitch;

        this.duration = undefined;
    }

    get pitch() {
        return this.soundTouch.pitch;
    }
    set pitch(pitch) {
        this.soundTouch.pitch = pitch;
    }

    decodeAudioData(data) {
        return this.context.decodeAudioData(data);
    }

    setBuffer(buffer) {
        const bufferSource = this.context.createBufferSource();
        bufferSource.buffer = buffer;
        this.samples = new Float32Array(BUFFER_SIZE * 2);
        this.source = {
            extract: (target, numFrames, position) => {
                this.emitter.emit('time', (position / this.context.sampleRate));
                const l = buffer.getChannelData(0);
                const r = buffer.getChannelData(1);
                for (let i = 0; i < numFrames; i++) {
                    target[i * 2] = l[i + position];
                    target[i * 2 + 1] = r[i + position];
                }
                return Math.min(numFrames, l.length - position);
            },
        };
        this.simpleFilter = new SimpleFilter(this.source, this.soundTouch);

        this.duration = buffer.duration;
        this.emitter.emit('duration', buffer.duration);
    }

    play() {
        this.scriptProcessor.connect(this.context.destination);
    }

    pause() {
        this.scriptProcessor.disconnect(this.context.destination);
    }

    seekPercent(percent) {
        if (this.simpleFilter !== undefined) {
            this.simpleFilter.sourcePosition = Math.round(
                percent / 100 * this.duration * this.context.sampleRate
            );
        }
    }
}

export default PlaybackEngine;


在这个设置中,用 Waveform.play() 我可以从 wavesurfer 实例中进行回放,但无法操纵音调。同样,在使用 playbackEngine.play() 我可以操作音高,但是失去了所有Wavesurfer的功能。

虽然我很确定问题出在Wavesurfer和我的播放引擎使用了两个独立的AudioBuffers,但我需要在我的播放上下文中设置缓冲区,以及用waveurfer渲染波形。

我想看看是否有人能确认如何使用播放引擎的上下文、脚本处理器和AudioBuffer来控制Wavesurfer实例(即让Waveform.play()播放来自播放引擎的音频,以及更新Wavesurfer的用户界面)。

感谢所有的帮助。

reactjs web-audio-api audiocontext wavesurfer.js scriptprocessor
1个回答
0
投票

所以我最终手动删除了

audioScriptProcessor: playbackEngine.scriptProcessor,

从Wavesurfer初始化,然后手动将playbackEngine的脚本处理器附加到destinationNode上。我之前曾尝试过这样的设置,在播放过程中听到了恼人的爆音。我以为是烦人的samplebuffer错误,其实是来自我在文件之间不断广播时间的EventEmitter实例。去掉这个后,我的噪音问题就解决了(ツ)

© www.soinside.com 2019 - 2024. All rights reserved.