语音识别代码

80 阅读5分钟
<template>
    <div class="audio-recorder-container">
        <!-- 录音控制区域 -->
        <div class="recorder-controls">
            <el-button type="primary" @click="toggleRecording" :disabled="isUnsupported" :loading="isProcessing">
                <template #icon>
                    <el-icon>
                        <Microphone />
                    </el-icon>
                </template>
                {{ isRecording ? '停止录音' : '开始录音' }}
            </el-button>

            <div class="recording-info">
                <span v-if="isRecording" class="recording-indicator">
                    <span class="pulse"></span> 录音中...
                </span>
                <span v-if="isRecording" class="recording-time">
                    {{ formatTime(recordingTime) }}
                </span>
            </div>
        </div>

        <!-- 波形显示区域 -->
        <div class="wave-display" :class="{ active: isRecording }">
            <canvas ref="waveCanvas"></canvas>
        </div>

        <!-- 录音列表 -->
        <div class="recordings-list" v-if="recordings.length > 0">
            <h3>录音历史 ({{ recordings.length }})</h3>
            <div class="recording-item" v-for="(recording, index) in recordings" :key="index">
                <div class="item-header">
                    <span class="item-index">#{{ index + 1 }}</span>
                    <span class="item-time">{{ recording.time }}</span>
                    <span class="item-duration">{{ formatTime(recording.duration) }}</span>
                </div>
                <div class="item-content">
                    <audio controls :src="recording.url"></audio>
                    <div class="item-transcript" v-if="recording.transcript">{{ recording.transcript }}</div>
                    <div class="item-actions">
                        <el-button size="small" @click="playRecording(index)" :disabled="isPlaying">
                            <el-icon>
                                <VideoPlay />
                            </el-icon>
                        </el-button>
                        <el-button size="small" @click="deleteRecording(index)" type="danger">
                            <el-icon>
                                <Delete />
                            </el-icon>
                        </el-button>
                    </div>
                </div>
            </div>
        </div>

        <!-- 浏览器不支持提示 -->
        <div v-if="isUnsupported" class="browser-warning">
            <el-alert title="浏览器不支持" type="warning" description="您的浏览器不支持音频录制功能,请使用最新版Chrome或Edge浏览器" show-icon
                closable />
        </div>

        <!-- 错误提示 -->
        <div v-if="error" class="error-message">
            <el-alert :title="error" type="error" show-icon closable @close="error = null" />
        </div>
    </div>
</template>

<script setup>
import { ref, onUnmounted, onMounted } from 'vue'
import { Microphone, VideoPlay, Delete, Timer } from '@element-plus/icons-vue'
import RecordRTC from 'recordrtc'
import useSpeechRecognition from '@/utils/useSpeechRecognition'
const emit = defineEmits(['recording-saved'])

// 录音相关变量
let audioContext = null
let analyser = null
let dataArray = null
let recorder = null
let stream = null
let recordingStartTime = 0

// 动画相关变量
const waveCanvas = ref(null)
let ctx = null
let animationFrameId = null

// 状态变量
const isRecording = ref(false)
const isUnsupported = ref(false)
const isProcessing = ref(false)
const isPlaying = ref(false)
const error = ref(null)
const recordingTime = ref(0)
let timeInterval = null

// 录音列表
const recordings = ref([])

/**
 * 检查浏览器支持情况
 */
const checkBrowserSupport = () => {
    isUnsupported.value = !(
        navigator.mediaDevices &&
        (window.AudioContext || window.webkitAudioContext) &&
        RecordRTC
    )
}

/**
 * 调整画布大小
 */
const resizeCanvas = () => {
    if (!waveCanvas.value) return
    const container = waveCanvas.value.parentElement
    waveCanvas.value.width = container.clientWidth
    waveCanvas.value.height = 46
}

/**
 * 格式化时间为 MM:SS
 */
const formatTime = (seconds) => {
    const mins = Math.floor(seconds / 60).toString().padStart(2, '0')
    const secs = Math.floor(seconds % 60).toString().padStart(2, '0')
    return `${mins}:${secs}`
}

/**
 * 更新录音时间
 */
const updateRecordingTime = () => {
    recordingTime.value = Math.floor((Date.now() - recordingStartTime) / 1000)
}

/**
 * 模拟语音识别(实际项目中应调用语音识别API)
 */
const recognizeSpeech = async (blob) => {
    return new Promise((resolve) => {
        setTimeout(() => {
            // 这里是模拟识别结果
            const mockResponses = [
                "这是一个测试录音内容",
                "今天天气真好",
                "请记录我的会议笔记",
                "Vue3的录音功能实现",
                "语音识别技术展示"
            ]
            const randomText = mockResponses[Math.floor(Math.random() * mockResponses.length)]
            resolve(randomText)
        }, 1500)
    })
}

/**
 * 组件挂载时初始化
 */
onMounted(() => {
    checkBrowserSupport()

    if (waveCanvas.value) {
        ctx = waveCanvas.value.getContext('2d')
        resizeCanvas()
        window.addEventListener('resize', resizeCanvas)
    }

    // 加载本地存储的录音记录
    const savedRecordings = localStorage.getItem('audioRecordings')
    if (savedRecordings) {
        recordings.value = JSON.parse(savedRecordings)
    }
})

/**
 * 组件卸载时清理资源
 */
onUnmounted(() => {
    stopRecording()
    window.removeEventListener('resize', resizeCanvas)
    if (timeInterval) clearInterval(timeInterval)
})

/**
 * 保存录音记录到本地存储
 */
const saveRecordingsToLocal = () => {
    localStorage.setItem('audioRecordings', JSON.stringify(recordings.value))
}

/**
 * 切换录音状态
 */
const toggleRecording = async () => {
    if (isRecording.value) {
        await stopRecording()
    } else {
        await startRecording()
    }
}

/**
 * 开始录音
 */
const startRecording = async () => {
    if (isUnsupported.value) {
        error.value = '当前浏览器不支持录音功能'
        return
    }

    try {
        error.value = null
        isProcessing.value = true
        recordingStartTime = Date.now()
        recordingTime.value = 0

        // 获取音频流
        stream = await navigator.mediaDevices.getUserMedia({
            audio: {
                echoCancellation: true,
                noiseSuppression: true,
                autoGainControl: true,
                channelCount: 1
            }
        })

        // 初始化音频分析
        audioContext = new (window.AudioContext || window.webkitAudioContext)()
        const source = audioContext.createMediaStreamSource(stream)
        analyser = audioContext.createAnalyser()
        analyser.fftSize = 256
        source.connect(analyser)
        dataArray = new Uint8Array(analyser.frequencyBinCount)

        // 清理旧的录音器
        if (recorder) {
            recorder.destroy()
            recorder = null
        }

        // 创建新的录音器
        recorder = new RecordRTC(stream, {
            type: 'audio',
            mimeType: 'audio/wav',
            recorderType: RecordRTC.StereoAudioRecorder,
            numberOfAudioChannels: 1,
            sampleRate: 16000,
            desiredSampRate: 16000,
            disableLogs: true,
            timeSlice: 100,
            ondataavailable: (blob) => {
                // 可以在这里处理实时音频数据
            }
        })

        // 开始录音
        recorder.startRecording()
        isRecording.value = true
        isProcessing.value = false

        // 开始计时
        timeInterval = setInterval(updateRecordingTime, 1000)

        // 开始动画
        drawWave()

    } catch (err) {
        console.error('录音启动失败:', err)
        error.value = `无法访问麦克风: ${err.message}`
        isProcessing.value = false
        await stopRecording()
    }
}

/**
 * 停止录音
 */
const stopRecording = async () => {
    if (!isRecording.value) return

    isProcessing.value = true

    try {
        // 停止计时
        if (timeInterval) {
            clearInterval(timeInterval)
            timeInterval = null
        }

        // 停止动画
        if (animationFrameId) {
            cancelAnimationFrame(animationFrameId)
            animationFrameId = null
        }

        let blob = null
        let duration = recordingTime.value

        // 停止录音并获取数据
        if (recorder) {
            await new Promise((resolve) => {
                recorder.stopRecording(() => {
                    try {
                        if (recorder) {
                            blob = recorder.getBlob()
                            console.log('录音完成,时长:', duration, '秒')

                            // 生成URL用于播放
                            const url = URL.createObjectURL(blob)

                            // 模拟语音识别
                            recognizeSpeech(blob).then(text => {

                                // 添加到录音列表
                                const newRecording = {
                                    url,
                                    blob,
                                    duration,
                                    transcript: text,
                                    time: new Date().toLocaleString()
                                }

                                recordings.value.unshift(newRecording)
                                saveRecordingsToLocal()
                                emit('recording-saved', newRecording)
                            })
                        }
                        resolve()
                    } catch (e) {
                        console.error('获取录音数据失败:', e)
                        error.value = '获取录音数据失败'
                        resolve()
                    }
                })
            })
        }

    } catch (err) {
        console.error('录音停止错误:', err)
        error.value = `录音停止时出错: ${err.message}`
    } finally {
        // 清理资源
        if (stream) {
            stream.getTracks().forEach(track => track.stop())
            stream = null
        }

        if (audioContext && audioContext.state !== 'closed') {
            await audioContext.close().catch(err => console.error('关闭音频上下文失败', err))
        }

        if (recorder) {
            recorder.destroy()
            recorder = null
        }

        analyser = null
        dataArray = null
        isRecording.value = false
        isProcessing.value = false
    }
}

/**
 * 播放录音
 */
const playRecording = (index) => {
    if (isPlaying.value) return

    const recording = recordings.value[index]
    if (!recording) return

    isPlaying.value = true
    const audio = new Audio(recording.url)
    audio.play()

    audio.onended = () => {
        isPlaying.value = false
    }
}

/**
 * 删除录音
 */
const deleteRecording = (index) => {
    if (index >= 0 && index < recordings.value.length) {
        // 释放URL对象
        URL.revokeObjectURL(recordings.value[index].url)
        recordings.value.splice(index, 1)
        saveRecordingsToLocal()
    }
}

/**
 * 绘制音频波形
 */
const drawWave = () => {
    if (!ctx || !waveCanvas.value || !dataArray || !analyser || !isRecording.value) return

    const { width, height } = waveCanvas.value

    // 清除画布
    ctx.clearRect(0, 0, width, height)

    // 创建渐变色
    const gradient = ctx.createLinearGradient(0, 0, width, 0)
    gradient.addColorStop(0, '#6366f1') // indigo-500
    gradient.addColorStop(0.5, '#a855f7') // purple-500
    gradient.addColorStop(1, '#8b5cf6') // violet-500

    // 获取最新的音频数据
    analyser.getByteFrequencyData(dataArray)

    // 计算音量平均值,用于动态调整波形振幅
    const average = dataArray.reduce((acc, val) => acc + val, 0) / dataArray.length
    const volumeMultiplier = Math.max(0.5, average / 128)

    // 绘制主波形
    ctx.strokeStyle = gradient
    ctx.lineWidth = 3
    ctx.beginPath()

    // 使用时间和音频数据创建动态波形
    const time = Date.now() * 0.05 / 1000
    for (let x = 0; x < width; x++) {
        // 将x坐标映射到音频数据索引
        const dataIndex = Math.floor((x / width) * dataArray.length)
        // 计算音频因子(0-1)
        const audioFactor = dataArray[dataIndex] / 255

        // 计算波形高度,结合时间和音频数据
        const y = height / 2 +
            Math.sin(x * 0.02 + time) * 20 * volumeMultiplier * (0.5 + audioFactor) +
            Math.sin(x * 0.03 + time * 1.3) * 10 * volumeMultiplier * audioFactor

        if (x === 0) {
            ctx.moveTo(x, y)
        } else {
            ctx.lineTo(x, y)
        }
    }
    ctx.stroke()

    // 绘制辅助波形(更细、更透明)
    ctx.strokeStyle = 'rgba(192, 132, 252, 0.4)' // purple-400 with opacity
    ctx.lineWidth = 1.5
    ctx.beginPath()

    for (let x = 0; x < width; x++) {
        const dataIndex = Math.floor((x / width) * dataArray.length)
        const audioFactor = dataArray[dataIndex] / 255

        const y = height / 2 +
            Math.sin(x * 0.014 - time * 0.8) * 14 * volumeMultiplier * audioFactor

        if (x === 0) {
            ctx.moveTo(x, y)
        } else {
            ctx.lineTo(x, y)
        }
    }
    ctx.stroke()

    // 绘制音量指示器(频谱柱状图)
    const barWidth = 2
    const barGap = 3
    const barCount = Math.floor(width / (barWidth + barGap))
    const barMaxHeight = height * 0.7

    ctx.fillStyle = gradient

    for (let i = 0; i < barCount; i++) {
        const dataIndex = Math.floor((i / barCount) * dataArray.length)
        const barHeight = (dataArray[dataIndex] / 255) * barMaxHeight

        if (barHeight > 0) {
            // 绘制圆角矩形
            const x = i * (barWidth + barGap)
            const y = height - barHeight
            const radius = barWidth / 2

            ctx.beginPath()
            ctx.moveTo(x + radius, y)
            ctx.lineTo(x + barWidth - radius, y)
            ctx.quadraticCurveTo(x + barWidth, y, x + barWidth, y + radius)
            ctx.lineTo(x + barWidth, height - radius)
            ctx.quadraticCurveTo(x + barWidth, height, x + barWidth - radius, height)
            ctx.lineTo(x + radius, height)
            ctx.quadraticCurveTo(x, height, x, height - radius)
            ctx.lineTo(x, y + radius)
            ctx.quadraticCurveTo(x, y, x + radius, y)
            ctx.closePath()
            ctx.fill()
        }
    }

    // 继续下一帧动画
    if (isRecording.value) {
        animationFrameId = requestAnimationFrame(drawWave)
    }
}
</script>

<style scoped>
.audio-recorder-container {
    width: 100%;
    max-width: 800px;
    margin: 0 auto;
    padding: 20px;
    border-radius: 8px;
    background: #fff;
    box-shadow: 0 2px 12px 0 rgba(0, 0, 0, 0.1);
}

.recorder-controls {
    display: flex;
    align-items: center;
    gap: 20px;
    margin-bottom: 20px;
}

.recording-info {
    display: flex;
    align-items: center;
    gap: 15px;
    color: #666;
}

.recording-indicator {
    display: flex;
    align-items: center;
    gap: 5px;
    color: #f56c6c;
}

.pulse {
    display: inline-block;
    width: 10px;
    height: 10px;
    border-radius: 50%;
    background: #f56c6c;
    animation: pulse 1.5s infinite;
}

@keyframes pulse {
    0% {
        transform: scale(0.95);
        opacity: 0.7;
    }

    50% {
        transform: scale(1.1);
        opacity: 1;
    }

    100% {
        transform: scale(0.95);
        opacity: 0.7;
    }
}

.wave-display {
    margin: 20px 0;
    border-radius: 4px;
    overflow: hidden;
    transition: all 0.3s;
    opacity: 0.7;
    background: #f5f7fa;
}

.wave-display.active {
    opacity: 1;
    box-shadow: 0 0 10px rgba(64, 158, 255, 0.3);
}

.recording-content {
    margin: 20px 0;
    padding: 15px;
    border-radius: 4px;
    background: #f5f7fa;
}

.recording-content h3 {
    margin-top: 0;
    margin-bottom: 10px;
    color: #409EFF;
}

.transcript-text {
    padding: 10px;
    background: white;
    border-radius: 4px;
    border-left: 3px solid #409EFF;
}

.recordings-list {
    margin-top: 30px;
}

.recordings-list h3 {
    margin-bottom: 15px;
    padding-bottom: 10px;
    border-bottom: 1px solid #ebeef5;
    color: #409EFF;
}

.recording-item {
    margin-bottom: 15px;
    padding: 15px;
    border-radius: 4px;
    background: #f5f7fa;
    transition: all 0.3s;
}

.recording-item:hover {
    background: #ebf0f5;
}

.item-header {
    display: flex;
    align-items: center;
    gap: 15px;
    margin-bottom: 10px;
    font-size: 14px;
    color: #666;
}

.item-index {
    font-weight: bold;
    color: #409EFF;
}

.item-content {
    display: flex;
    flex-direction: column;
    gap: 10px;
}

.item-transcript {
    padding: 10px;
    background: white;
    border-radius: 4px;
    font-size: 14px;
}

.item-actions {
    display: flex;
    justify-content: flex-end;
    gap: 5px;
}

audio {
    width: 100%;
    margin-top: 5px;
}

.browser-warning,
.error-message {
    margin: 20px 0;
}

.el-button {
    min-width: 120px;
}
</style>