HTML录音直到沉默?

问题描述 投票:11回答:3

我正在寻找一种基于浏览器的录音方式,直到发生沉默。

可以在Firefox和Chrome中使用麦克风录制HTML音频 - 使用Recordmp3js,请参阅:http://nusofthq.com/blog/recording-mp3-using-only-html5-and-javascript-recordmp3-js/和github上的代码:http://github.com/nusofthq/Recordmp3js

我无法看到一种方法来改变代码记录直到沉默。

记录,直到沉默可以使用Java为原生Android应用程序完成(和调整) - 请参阅:Android audio capture silence detection

qazxsw poi演示了一个浏览器可以做到的 - 但我怎样才能使用Javascript?有任何想法吗?

javascript html5 audio capture microphone
3个回答
11
投票

如果您使用Web Audio API,请通过调用:navigator.getUserMedia打开实时麦克风音频捕获,然后使用:createScriptProcessor创建节点,然后为该节点分配其事件的回调:onaudioprocess。在你的回调函数里面(我使用script_processor_analysis_node)你可以访问实时的实时音频缓冲区,然后你可以解析它寻找静音(振幅低的某个时间长度[保持接近于零])。

对于正常的时域音频曲线,请参阅:array_time_domain,每次调用回调时都会重新填充script_processor_analysis_node ...类似于频域请参阅array_freq_domain

调低扬声器音量或使用耳机以避免来自麦克风的反馈 - >扬声器 - >麦克风......

Google Voice Search

3
投票

这是一个老帖子,但我相信很多人会有同样的问题所以我在这里发布我的解决方案。使用<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <title>capture microphone then show time & frequency domain output</title> <script type="text/javascript"> var webaudio_tooling_obj = function () { var audioContext = new AudioContext(); console.log("audio is starting up ..."); var BUFF_SIZE_RENDERER = 16384; var audioInput = null, microphone_stream = null, gain_node = null, script_processor_node = null, script_processor_analysis_node = null, analyser_node = null; if (!navigator.getUserMedia) navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia; if (navigator.getUserMedia){ navigator.getUserMedia({audio:true}, function(stream) { start_microphone(stream); }, function(e) { alert('Error capturing audio.'); } ); } else { alert('getUserMedia not supported in this browser.'); } // --- function show_some_data(given_typed_array, num_row_to_display, label) { var size_buffer = given_typed_array.length; var index = 0; console.log("__________ " + label); if (label === "time") { for (; index < num_row_to_display && index < size_buffer; index += 1) { var curr_value_time = (given_typed_array[index] / 128) - 1.0; console.log(curr_value_time); } } else if (label === "frequency") { for (; index < num_row_to_display && index < size_buffer; index += 1) { console.log(given_typed_array[index]); } } else { throw new Error("ERROR - must pass time or frequency"); } } function process_microphone_buffer(event) { var i, N, inp, microphone_output_buffer; microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now } function start_microphone(stream){ gain_node = audioContext.createGain(); gain_node.connect( audioContext.destination ); microphone_stream = audioContext.createMediaStreamSource(stream); microphone_stream.connect(gain_node); script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1); script_processor_node.onaudioprocess = process_microphone_buffer; microphone_stream.connect(script_processor_node); // --- enable volume control for output speakers document.getElementById('volume').addEventListener('change', function() { var curr_volume = this.value; gain_node.gain.value = curr_volume; console.log("curr_volume ", curr_volume); }); // --- setup FFT script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1); script_processor_analysis_node.connect(gain_node); analyser_node = audioContext.createAnalyser(); analyser_node.smoothingTimeConstant = 0; analyser_node.fftSize = 2048; microphone_stream.connect(analyser_node); analyser_node.connect(script_processor_analysis_node); var buffer_length = analyser_node.frequencyBinCount; var array_freq_domain = new Uint8Array(buffer_length); var array_time_domain = new Uint8Array(buffer_length); console.log("buffer_length " + buffer_length); script_processor_analysis_node.onaudioprocess = function() { // get the average for the first channel analyser_node.getByteFrequencyData(array_freq_domain); analyser_node.getByteTimeDomainData(array_time_domain); // draw the spectrogram if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) { show_some_data(array_freq_domain, 5, "frequency"); show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file // examine array_time_domain for near zero values over some time period } }; } }(); // webaudio_tooling_obj = function() </script> </head> <body> <p>Volume</p> <input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/> </body> </html>

下面是我用于电子应用程序的示例演示代码

hark.js

0
投票

来自@Scott Stensland的解决方案不允许我解析沉默。当我解析两个数组时,我得到相同的值 - 这就是我在解析 hark = require('./node_modules/hark/hark.bundle.js') navigator.getUserMedia({ audio : true}, onMediaSuccess, function(){}); function onMediaSuccess(blog) { var options = {}; var speechEvents = hark(blog, options); speechEvents.on('speaking', function() { console.log('speaking'); }); speechEvents.on('stopped_speaking', function() { console.log('stopped_speaking'); }); }; 时总是在解析0arrayFreqDomain时总是得到128

arrayTimeDomain
© www.soinside.com 2019 - 2024. All rights reserved.