在Chrome中使用JavaScript将音频录制为.wav

问题描述 投票:0回答:1

我正在建立一个记录来自用户设备的音频的网页,并将其发送到Microsoft的认知语音服务以进行语音到文本的转换。到目前为止,我已经能够创建和播放用JavaScript创建的.ogg文件,但是我需要获取.wav格式的文件。

Blob类型audio/wav不能被依赖,因为不是所有的浏览器都支持它(至少我的浏览器不支持)。这些Blob发送到Django服务器并由其存储。当我尝试使用PySoundFile打开这些文件时,出现错误提示File contains data in an unknown format。使用new Blob(chunks, { type: 'audio/ogg; codecs=opus' })创建斑点,并使用django.db.FileField保存斑点。 Blob块来自django.db.FileField

更新:我放弃使用MediaRecorder.ondataavailable,而是选择了MediaRecorder。同样,Firefox也可以,但Chrome不能。 Chrome似乎在音频的结尾占了很小的一部分,并在音频的长度上重复了这一部分。这是我使用的代码,该代码基于Matt Diamond在ScriptProcessorNode的工作。可以在github.com/mattdiamond/Recorderjs上看到使用他的作品的演示,该演示对我和Firefox均适用。另外,我的原始代码在一个类中,但是我不想包括整个类。如果翻译中出现语法错误,我深表歉意。

webaudiodemos.appspot.com/AudioRecorder/index.html

更新:我已经确认,当将Chrome的缓冲区的值作为输入提供给Firefox上的交错时,其输出与Chrome的输出相同。这表明Chrome没有使用正确的值填充recBuffers。确实,当我在Chrome上查看recBuffers时,每个通道都充满了交替的列表。例如:

let recBuffers = [[], []];
let recLength = 0;
let numChannels = 2;
let listening = false;
let timeout = null;
let constraints = {
    audio: true
};
let failedToGetUserMedia = false;

if (navigator.getUserMedia) {
    navigator.getUserMedia(constraints, (stream) => {
        init(stream);
    }, (err) => {
        alert('Unable to access audio.\n\n' + err);
        console.log('The following error occurred: ' + err);
        failedToGetUserMedia = true;
    });
}
else if (navigator.mediaDevices.getUserMedia) {
    navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
        init(stream);
    }).catch((err) => {
        alert('Unable to access audio.\n\n' + err);
        console.log('The following error occurred: ' + err);
        failedToGetUserMedia = true;
    });
}
else failedToGetUserMedia = true;

function beginRecording() {
    recBuffers = [[], []];
    recLength = 0;
    listening = true;
    timeout = setTimeout(() => {
        endRecording();
    }, maxTime);
}

function endRecording() {
    clearTimeout(timeout);
    timeout = null;
    exportWAV();
}

function init(stream) {
    let audioContext = new AudioContext();
    let source = audioContext.createMediaStreamSource(stream);
    let context = source.context;
    let node = (context.createScriptProcessor || context.createJavaScriptNode).call(context, 4096, numChannels, numChannels);
    node.onaudioprocess = (e) => {
        if (!listening) return;

        for (var i = 0; i < numChannels; i++) {
            recBuffers[i].push(e.inputBuffer.getChannelData(i));
        }

        recLength += recBuffers[0][0].length;
    }
    source.connect(node);
    node.connect(context.destination);
}

function mergeBuffers(buffers, len) {
    let result = new Float32Array(len);
    let offset = 0;
    for (var i = 0; i < buffers.length; i++) {
        result.set(buffers[i], offset);
        offset += buffers[i].length;
    }
    return result;
}

function interleave(inputL, inputR) {
    let len = inputL.length + inputR.length;
    let result = new Float32Array(len);

    let index = 0;
    let inputIndex = 0;

    while (index < len) {
        result[index++] = inputL[inputIndex];
        result[index++] = inputR[inputIndex];
        inputIndex++;
    }

    return result;
}

function exportWAV() {
    let buffers = [];
    for (var i = 0; i < numChannels; i++) {
        buffers.push(mergeBuffers(recBuffers[i], recLength));
    }

    let interleaved = numChannels == 2 ? interleave(buffers[0], buffers[1]) : buffers[0];
    let dataView = encodeWAV(interleaved);
    let blob = new Blob([ dataView ], { type: 'audio/wav' });
    blob.name = Math.floor((new Date()).getTime() / 1000) + '.wav';

    listening = false;

    return blob;
}

function floatTo16BitPCM(output, offset, input){
    for (var i = 0; i < input.length; i++, offset+=2){
        var s = Math.max(-1, Math.min(1, input[i]));
        output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
    }
}

function writeString(view, offset, string){
    for (var i = 0; i < string.length; i++) {
        view.setUint8(offset + i, string.charCodeAt(i));
    }
}

function encodeWAV(samples){
    var buffer = new ArrayBuffer(44 + samples.length * 2);
    var view = new DataView(buffer);

    /* RIFF identifier */
    writeString(view, 0, 'RIFF');
    /* file length */
    view.setUint32(4, 36 + samples.length * 2, true);
    /* RIFF type */
    writeString(view, 8, 'WAVE');
    /* format chunk identifier */
    writeString(view, 12, 'fmt ');
    /* format chunk length */
    view.setUint32(16, 16, true);
    /* sample format (raw) */
    view.setUint16(20, 1, true);
    /* channel count */
    view.setUint16(22, numChannels, true);
    /* sample rate */
    view.setUint32(24, context.sampleRate, true);
    /* byte rate (sample rate * block align) */
    view.setUint32(28, context.sampleRate * 4, true);
    /* block align (channel count * bytes per sample) */
    view.setUint16(32, numChannels * 2, true);
    /* bits per sample */
    view.setUint16(34, 16, true);
    /* data chunk identifier */
    writeString(view, 36, 'data');
    /* data chunk length */
    view.setUint32(40, samples.length * 2, true);

    floatTo16BitPCM(view, 44, samples);

    return view;
}

if (!failedToGetUserMedia) beginRecording();

当然,实际值是不同的。这仅是说明这一点的示例。

javascript google-chrome audio wav
1个回答
1
投票

最初,我正在使用MediaRecorder来获取音频并根据类型为recBuffers = [[ [2, 3], [7, 1], [2, 3], [7, 1], [2, 3], [7, 1], [2, 3], [7, 1], [2, 3], [7, 1] ], [ [5, 4], [6, 8], [5, 4], [6, 8], [5, 4], [6, 8], [5, 4], [6, 8], [5, 4], [6, 8] ]] 的音频创建Blob。那在Chrome中不起作用,但是在Firefox中。我放弃了,开始使用ScriptProcessorNode。同样,它可以在Firefox上运行,但不能在Chrome上运行。经过一些调试后,很明显在Chrome上,recBuffers充满了交替的列表。我仍然不确定为什么会发生这种情况,但是我猜是由于扩展语法解决了这种问题,因此像是作用域或缓存之类的东西。将onaudioprocess中的行从audio/wav更改为this.recBuffers[i].push(e.inputBuffer.getChannelData(i));是可行的。

© www.soinside.com 2019 - 2024. All rights reserved.