webRTC录制音频

154 阅读3分钟

方案一

<!DOCTYPE html>
<html lang="en">

<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <title>Document</title>
</head>

<body>
    <audio id="local-audio" src="" autoplay controls>播放麦克风捕获的声音</audio>
    <div>
        <button id="start">录音</button>
    </div>
    <div>
        <button id="stop">停止录音</button>
    </div>
    <script>

        const leftDataList = [], rightDataList = [];


        document.getElementById('start').addEventListener('click', () => {
            window.navigator.mediaDevices.getUserMedia({
                audio: true
            }).then(stream => {
                let audioContext = new AudioContext();
                // 在做任何其他操作之前,您需要创建一个AudioContext对象,因为所有事情都是在上下文中发生的
                // 创建一个MediaStreamAudioSourceNode接口来关联可能来自本地计算机麦克风或其他来源的音频流MediaStream。
                const source = audioContext.createMediaStreamSource(stream);
                // AudioContext 接口的createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels) 方法创建一个ScriptProcessorNode 用于通过 JavaScript 直接处理音频
                // bufferSize缓冲区大小,以样本帧为单位 该取值控制着 audioprocess 事件被分派的频率,以及每一次调用多少样本帧被处理
                // numberOfInputChannels 值为整数,用于指定输入 node 的声道的数量,默认值是 2,最高能取 32.
                // numberOfOutputChannels 值为整数,用于指定输出 node 的声道的数量,默认值是 2,最高能取 32.
                const processor = audioContext.createScriptProcessor(4096, 2, 2);

                // 需要连到扬声器消费掉outputBuffer,process回调才能触发
                // 并且由于不给outputBuffer设置内容,所以扬声器不会播放出声音
                processor.connect(audioContext.destination);

                // 把source连接到processor
                source.connect(processor);

                processor.onaudioprocess = function (event) {
                    console.log(event)

                    // 对音频流进行处理
                    let audioBuffer = event.inputBuffer;
                    //左声道
                    let leftChannelData = audioBuffer.getChannelData(0);
                    //右声道
                    let rightChannelData = audioBuffer.getChannelData(1);
                    leftDataList.push([...leftChannelData]);
                    rightDataList.push([...rightChannelData]);

                };
                // 关闭轨道
                setTimeout(() => {
                    console.clear()
                    processor.onaudioprocess = null
                    stream.getTracks().forEach(track => {
                        if (track.readyState === 'live') {
                            track.stop()
                        }
                    })
                }, 5000)
            }).catch(err => {
                console.log(err);
            });
        })

        document.getElementById('stop').addEventListener('click', () => {

            //合并左声道和右声道
            const mergeArray = (list) => {
                let length = list.length * list[0].length;
                let data = new Float32Array(length),
                    offset = 0;
                for (let i = 0; i < list.length; i++) {
                    data.set(list[i], offset);
                    offset += list[i].length;
                }
                return data;
            }

            //交叉合并左右声道
            const interleaveLeftAndRight = (left, right) => {
                let totalLength = left.length + right.length;
                let data = new Float32Array(totalLength);
                for (let i = 0; i < left.length; i++) {
                    let k = i * 2;
                    data[k] = left[i];
                    data[k + 1] = right[i];
                }
                return data;
            }

            const writeUTFBytes = (view, offset, string) => {
                const lng = string.length;
                for (let i = 0; i < lng; i++) {
                    view.setUint8(offset + i, string.charCodeAt(i));
                }
            }

            const createWavFile = (audioData) => {
                // 写入wav头部
                const WAV_HEAD_SIZE = 44;
                let buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE),
                    view = new DataView(buffer);
                writeUTFBytes(view, 0, 'RIFF');
                view.setUint32(4, 44 + audioData.length * 2, true);
                writeUTFBytes(view, 8, 'WAVE');
                writeUTFBytes(view, 12, 'fmt ');
                view.setUint32(16, 16, true);
                view.setUint16(20, 1, true);
                view.setUint16(22, 2, true);
                view.setUint32(24, 44100, true);
                view.setUint32(28, 44100 * 2, true);
                view.setUint16(32, 2 * 2, true);
                view.setUint16(34, 16, true);
                writeUTFBytes(view, 36, 'data');
                view.setUint32(40, audioData.length * 2, true);

                // 写入PCM数据
                let length = audioData.length;
                let index = 44;
                let volume = 1;
                for (let i = 0; i < length; i++) {
                    view.setInt16(index, audioData[i] * (0x7FFF * volume), true);
                    index += 2;
                }
                return buffer;
            }


            let leftData = mergeArray(leftDataList),
                rightData = mergeArray(rightDataList);
            //交叉合并左右声道
            let allData = interleaveLeftAndRight(leftData, rightData);
            let wavBuffer = createWavFile(allData);

            // wavBuffer转换为blob
            let blob = new Blob([new Uint8Array(wavBuffer)]);

            return blob;
        })
    </script>
</body>

</html>

缺点

  • 部分api已经被遗弃,后面可能会出问题 ,点这里了解

  • 代码太长

方案二

<!DOCTYPE html>
<html lang="en">

<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <title>Document</title>
</head>

<body>
    <audio id="local-audio" src="" autoplay controls>播放麦克风捕获的声音</audio>
    <div>
        <button id="start">录音</button>
    </div>
    <div>
        <button id="stop">停止录音</button>
    </div>
    <script>

        // 首先获取到音频流
        navigator.mediaDevices.getUserMedia({ audio: true })
            .then(function (stream) {
                // 创建MediaRecorder对象来录制音频
                const mediaRecorder = new MediaRecorder(stream);
                const chunks = [];

                // 监听录制数据可用事件并将数据存储到chunks数组中
                mediaRecorder.addEventListener("dataavailable", function (event) {
                    chunks.push(event.data);
                });

                // 监听录制结束事件,将chunks数组中的数据合并为一个Blob对象
                mediaRecorder.addEventListener("stop", function () {
                    const audioBlob = new Blob(chunks, { type: "audio/mpeg" });

                    // 将Blob对象转换为URL
                    const audioUrl = URL.createObjectURL(audioBlob);

                    // 将URL赋值给一个<a>标签,以便用户可以下载录制的音频文件
                    const audioLink = document.createElement("a");
                    audioLink.href = audioUrl;
                    audioLink.download = "recorded_audio.mp3";
                    document.body.appendChild(audioLink);
                    audioLink.click();
                });

                // 开始录制
                mediaRecorder.start();

                // 5秒后停止录制
                setTimeout(function () {
                    mediaRecorder.stop();
                    stream.getTracks().forEach(track => {
                        if (track.readyState === 'live') {
                            track.stop()
                        }
                    })
                }, 5000);
            })
            .catch(function (error) {
                console.log("Error:", error);
            });

    </script>
</body>

</html>

方案三

  • 使用插件"mic-recorder-to-mp3": "^2.2.2",

  • yarn add mic-recorder-to-mp3

  • 此方案为项目中使用方案,需要获取比特率为128的录音文件,传给设备进行播放

  • 获取录音权限,必须使用https://, localhost, file:// 三者之一

  • mic-recorder-to-mp3:地址:

import MicRecorder from 'mic-recorder-to-mp3'

// 录音初始化
    initRecorder() {
      //   if (location.protocol !== 'https:') {
      //     this.$message.info('使用音柱需要https协议')
      //     return
      //   }
      const mp3recorder = new MicRecorder({ bitRate: 128 })
      this.recorder = mp3recorder
    },
    // 开始录音
    startRecord() {
      if (!this.recorder) return
      this.recorder
        .start()
        .then(() => {
          this.$message.info('已开始录音')
        })
        .catch((error) => {
          console.error('无法开始录制音频:', error)
        })
    },
    // 停止录音
    stopRecord() {
      if (!this.recorder) return
      this.recorder
        .stop()
        .getMp3()
        .then((mp3Blob) => {
          console.log(mp3Blob)
          this.$message.info('录制完成')
        })
        .catch((error) => {
          console.error('获取 MP3 失败:', error)
        })
        .finally(() => {
          console.log('停止录制音频')
        })
    },

总结

  • 方案二使用的api没有被遗弃

  • 录音时,标签页上有一个红点,停止录音时,使用代码清除,不然会一直存在(track.stop())

  • 方案三挺不错的

相关文章