PC端 音频录制与可视化(vue)代码逐行讲解

4 阅读3分钟

前言

因为公司项目需要进行聊天对话支持语音识别文字,一共有两种方式:

  1. 使用浏览器自带语音识别文字
  2. 使用音频录制将文件传给后端转文字(项目中使用的)

使用到api:

  1. 获取音频流: navigator.mediaDevices.getUserMedia
  2. 录制音频: **MediaRecorder**
  3. 处理音频可视化: AudioContext

代码

Media 类

import { createApp } from "vue"
import XwyaWave from "@/components/XwyaWave"
/**
* app:挂载卸载组件使用
* thea:将Media的this指向放到外面
* el:创建div一个容器
*/
let app = null 
let thea = null
let el = document.createElement('div')
class Media {
  mediaRecorder: MediaRecorder | null = null; // 录制音频
  errorHandle: ((err: Error) => void) | null = null; // 错误回调
  successHandle: ((data: Blob, url: string) => void) | null = null; // 录音完毕回调
  audioChunks: BlobPart[] = [];// 存储二进制录音数据块
  audioContext: AudioContext | null = null; // 音频上下文
  analyser: AnalyserNode | null = null;  // 音频分析器
  source: MediaStreamAudioSourceNode | null = null; // 音频流
  isVisualizing: boolean = false; // 添加标志
  falg : boolean = false // 控制是否录音完毕
  constructor() {
    thea=this
    this.initStream();
    
  }

/**
 请求麦克风音频流
 初始化音频
 调用onDataAvailable 开启监听音频
 调用setupAudioProcessing初始化音频上下文
 挂载el到body中
*/
  private readonly async initStream(): Promise<void> {
    try {
      const stream: MediaStream = await navigator.mediaDevices.getUserMedia({ audio: true });
      this.mediaRecorder = new MediaRecorder(stream);
      this.onDataAvailable();
      this.setupAudioProcessing(stream);
      document.body.appendChild(el);
    } catch (err) {
      if (this.errorHandle) {
        this.errorHandle(err);
      }
    }
  }
  /**
      创建音频上下文
      将音频流转换成音频源并连接到音频管道中
      创建音频解析器用于实时显示频谱和波形
      将麦克风的音频流传入音频解析器使其能够对音频数据进行分析
      设置频率分量为2048
  */
  private readonly setupAudioProcessing(stream: MediaStream): void {
    this.audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
    this.source = this.audioContext.createMediaStreamSource(stream);
    this.analyser = this.audioContext.createAnalyser();
    this.source.connect(this.analyser);
    this.analyser.fftSize = 2048;
    this.isVisualizing = true; // 启动可视化
  }
 /*
  开启对音频的监控并将数据push到数据块中
  调用generateAudioBlob对数据进行处理
 */
  private readonly onDataAvailable() {
    this.mediaRecorder?.addEventListener('dataavailable', (event) => {
      if (event.data.size > 0) {
        this.audioChunks.push(event.data);
        this.generateAudioBlob();
      }
    });
  }
  /*
      开启录音
      开启录音可视化组件
      挂载音频可视化组件
  */
  start() {
    this.mediaRecorder?.start();
    this.isVisualizing = true; // 开启可视化
    app = createApp(XwyaWave, {
      isVisualizing: this.isVisualizing,
      analyser: this.analyser,
      v:thea
    })
    this.createWave()
  }
 /**
   this.falg控制是否传回数据
   关闭录音
   停止可视化
   卸载可视化界面
 */
  stop(f) {
    this.falg = f
    this.mediaRecorder?.stop();
    this.isVisualizing = false; // 停止可视化
    app && app.unmount()
  }
  /**
  
  */
  /**
   录音结束 传会二进制数据和url地址
  */
  private readonly generateAudioBlob() {
    if (this.audioChunks.length > 0) {
      if (this.falg){
        const blob = new Blob(this.audioChunks, { type: 'audio/wav' });
        const url = URL.createObjectURL(blob);
        
      if (this.successHandle) {
        this.successHandle(blob, url);
      }
      }
      
    }
  }
  /**
   初始化成功回调
  */

  onSuccess(handle: (data: Blob, url: string) => void) {
    this.successHandle = handle;
  }
 /**
   初始化错误回调
  */
  onError(handle: (err: any) => void): void {
    this.errorHandle = handle;
  }
  /**
  挂载可视化组件
  */
  private readonly createWave() { 
    app.mount(el)
  }
}
export default Media;

vue可视化组件

<script lang="ts" setup>
import { ref, defineProps, onMounted, nextTick, onBeforeUnmount } from 'vue'
const props = defineProps({
  isVisualizing:{
    type: Boolean,
    default: false
  },
  analyser:{
    type: Object,
    default: () => { }
  },
  v:{
    type: Object,
    default: () => { }
  }
})

let animationFrameId:number|null = null
const cav = ref(null)
const draw = (ctx:any, bufferLength:any, dataArray:any) => {
  if (!props.isVisualizing) return; // 检查标志
  props.analyser.getByteFrequencyData(dataArray);
  animationFrameId = requestAnimationFrame(() => { draw(ctx, bufferLength, dataArray) });
  ctx.clearRect(0, 0, cav.value.width, cav.value.height);
  const barWidth = 5
  let barHeight;
  let x = 0;
  for (let i = 0; i < bufferLength; i++) {
    barHeight = dataArray[i];
    ctx.fillStyle = 'rgb(' + 255 + ',255,255)';
    ctx.fillRect(x, cav.value.height - barHeight / 2, barWidth, barHeight / 2);
    x += barWidth + 1;
  }
};
const start = () => {
  try {
    const ctx = cav.value.getContext('2d');
    const bufferLength = props.analyser.frequencyBinCount
    const dataArray = new Uint8Array(bufferLength);
    draw(ctx, bufferLength, dataArray);
  } catch (error) {
    console.log(error);
    props.v.errorHandle(error)
    nextTick(() => { 
      props.v.stop(false)
    })
  }
}
onMounted(() => {
  start()
})
onBeforeUnmount(() => {
  cancelAnimationFrame(animationFrameId)
})
</script>

<template>
  <Teleport to='body'>
    <div class="audio_wave_container">
      <div class="audio_wave_packing">
        <canvas id="wave" ref="cav"></canvas>
        <div class="audio_wave_text_container">
          <p class="audio_wave_confirm" @click="() => v.stop(true)">确认</p>
          <p class="audio_wave_cancel" @click="() => v.stop(false)">取消</p>
        </div>
      </div>
    </div>
  </Teleport>
</template>

<style lang="css" scoped>
.audio_wave_container {
  position: fixed;
  inset: 0;
  display: flex;
  justify-content: center;
  align-items: center;
  z-index: 50;
}

.audio_wave_packing {
  border-radius: 20px;
  box-sizing: border-box;
  padding: 20px;
  position: absolute;
  width: 180px;
  height: 180px;
  display: flex;
  flex-flow: column;
  background-color: rgba(0, 0, 0, .6);
}

#wave {
  flex: 1;
}

.audio_wave_text_container {
  padding: 20px 0 0 0;
  display: flex;
  justify-content: center;
  gap: 10px;
  color: #fff;
}
</style>

使用

const media = new Media()
media.onSuccess((bolb,url)=>{})
media.onError((err)=>{})
media.start()
media.stop()