Search code examples
javascriptaudio-recording

How to lower mic input volume using navigator.mediaDevices.getUserMedia?


I'm creating audio record app using navigator.mediaDevices.getUserMedia() and it records every sound around me even very quiet and which is 10m away from me. I DO NOT play this sound, I only visualize it depending on volume, so I need only quite loud sounds or which are close to mic, cause there's too much interference.

Also if I enable playback to hear my mic input and start making quiet noise like tapping on the table, I can't here this sound in playback but I see it in visualizer and this is exactly what I don't want

Here's my code:

const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
this.audioContext = new AudioContext();
this.sourceNode = this.audioContext.createMediaStreamSource(stream);
this.analyserNode = this.audioContext.createAnalyser();

this.sourceNode.connect(this.analyserNode);

const data = new Float32Array(this.analyserNode.fftSize);
this.analyserNode.getFloatTimeDomainData(data);

So how can I lower mic sensivity using Web Audio API or lower mic input volume or maybe transform data from analyser? I've read about AudioContext.createGain(), gain.volume, but it's used for output audio volume, not input one


Solution

  • I've read about AudioContext.createGain(), gain.volume, but it's used for output audio volume, not input one

    No, it is used to control the volume of the audio that goes through it.

    You have to see your audio context nodes as a chain, then you might understand that you can indeed use a GainNode to control the input volume of the next node to which it is connected.

    Like e.g if we declare something like

    gainNode.gain.volume = 0.5;
    input.connect(gainNode);
    gainNode.connect(analyserNode);
    input.connect(audioContext.destination);
    

    it can be seen as

    Input [mic] ===>  GainNode  ===>  AnalyserNode
        100%   ||       50%                50%
               ||
                ===> AudioContext Output
                           100%
    

    So your gainNode here did lower the volume of your AnalyserNode, but not the one of the context output.


    But this is not really what you want.

    Indeed, the AnalyserNode API has minDecibels and maxDecibels properties which will do exactly what you want (filter out out of db range sounds).

    But these properties make sense only for frequency data (getXXXFrequencyData) since waveform doesn't take volume into account.

    However, it is still possible to check if this frequency data is in our required bounds before deciding if we should draw our waveform or not.

    polyfill();
    
    (async() => {
    
      const ctx = new AudioContext();
      const input = await loadFileAsBufferNode(ctx);
      const analyser = ctx.createAnalyser();
      analyser.minDecibels = -90;
      analyser.maxDecibels = -10;
      analyser.fftSize = 512;
      input.connect(analyser);
      const gainNode = ctx.createGain();
      input.connect(gainNode);
    
      const bufferLength = analyser.frequencyBinCount;
      const freqArray = new Uint8Array(bufferLength);
      const waveArray = new Uint8Array(bufferLength);
    
      const canvasCtx = canvas.getContext('2d');
      const WIDTH = canvas.width;
      const HEIGHT = canvas.height;
      canvasCtx.lineWidth = 2;
    
      draw();
      // taken from https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/maxDecibels#Example
      function draw() {
        requestAnimationFrame(draw);
    
        canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
        analyser.getByteFrequencyData(freqArray);
    
        gainNode.gain.value = 1;
        analyser.getByteTimeDomainData(waveArray);
    
        var barWidth = (WIDTH / bufferLength) * 2.5;
        var barHeight;
        var x = 0;
    
        for (var i = 0; i < bufferLength; i++) {
          barHeight = freqArray[i];
    
          canvasCtx.fillStyle = 'rgb(' + (barHeight + 100) + ',50,50)';
          canvasCtx.fillRect(x, HEIGHT - barHeight / 2, barWidth, barHeight / 2);
    
          x += barWidth + 1;
        }
        // here we check if the volume is in bounds
        if (freqArray.some(isTooHigh) || !freqArray.some(hasValue)) {
          canvasCtx.fillRect(0, HEIGHT / 2, WIDTH, 1);
          gainNode.gain.value = 0;
          return;
        }
    
        canvasCtx.beginPath();
        var sliceWidth = WIDTH * 1.0 / bufferLength;
        var x = 0;
        for (var i = 0; i < bufferLength; i++) {
          var v = waveArray[i] / 128.0;
          var y = v * HEIGHT / 2;
          if (i === 0) {
            canvasCtx.moveTo(x, y);
          } else {
            canvasCtx.lineTo(x, y);
          }
          x += sliceWidth;
        }
    
        canvasCtx.lineTo(canvas.width, canvas.height / 2);
        canvasCtx.stroke();
    
      };
    
      function isTooHigh(val) {
        return val === 255;
      }
    
      function hasValue(val) {
        return val;
      }
      // DOM
      maxDB.oninput = e => {
        const max = +maxDB.value;
        if (+minDB.value >= max) minDB.value = analyser.minDecibels = max - 1;
        analyser.maxDecibels = max;
      }
      minDB.oninput = e => {
        const min = +minDB.value;
        if (+maxDB.value <= min) maxDB.value = analyser.maxDecibels = min + 1;
        analyser.minDecibels = min;
      }
      out.onchange = e => {
        if (out.checked)
          gainNode.connect(ctx.destination);
        else
          gainNode.disconnect(ctx.destination);
      };
    
    })();
    
    function loadFileAsBufferNode(ctx, url = 'https://dl.dropboxusercontent.com/s/8c9m92u1euqnkaz/GershwinWhiteman-RhapsodyInBluePart1.mp3') {
      return fetch(url)
        .then(r => r.arrayBuffer())
        .then(buf => ctx.decodeAudioData(buf))
        .then(bufferNode => {
          const source = ctx.createBufferSource();
          source.buffer = bufferNode;
          source.repeat = true;
          source.start(0);
          return source;
        });
    };
    
    /* for Safari */
    function polyfill() {
      window.AudioContext = window.AudioContext || window.webkitAudioContext;
      try {
        const prom = new AudioContext().decodeAudioData(new ArrayBuffer()).catch(e => {});
      } catch (e) {
        const prev = AudioContext.prototype.decodeAudioData;
        Object.defineProperty(AudioContext.prototype, 'decodeAudioData', {
          get: () => asPromise
        });
    
        function asPromise(audioBuffer, done, failed) {
          return new Promise((res, rej) => {
            prev.apply(this, [audioBuffer, onsuccess, onerror]);
            function onsuccess(buf) {
              if (typeof done === 'function') done(buf);
              res(buf);
            }
            function onerror(err) {
              if (typeof failed === 'function') failed(err);
              rej(err);
            }
          });
        }
      }
    }
    <label>min<input type="range" id="minDB" min="-100" max="-1" value="-90"></label>
    <label>max<input type="range" id="maxDB" min="-99" max="0" value="-10"></label>
    <label>output audio<input type="checkbox" id="out"></label>
    <canvas id="canvas"></canvas>