<template>
<div class="audio-recorder-container">
<div class="recorder-controls">
<el-button type="primary" @click="toggleRecording" :disabled="isUnsupported" :loading="isProcessing">
<template #icon>
<el-icon>
<Microphone />
</el-icon>
</template>
{{ isRecording ? '停止录音' : '开始录音' }}
</el-button>
<div class="recording-info">
<span v-if="isRecording" class="recording-indicator">
<span class="pulse"></span> 录音中...
</span>
<span v-if="isRecording" class="recording-time">
{{ formatTime(recordingTime) }}
</span>
</div>
</div>
<div class="wave-display" :class="{ active: isRecording }">
<canvas ref="waveCanvas"></canvas>
</div>
<div class="recordings-list" v-if="recordings.length > 0">
<h3>录音历史 ({{ recordings.length }})</h3>
<div class="recording-item" v-for="(recording, index) in recordings" :key="index">
<div class="item-header">
<span class="item-index">#{{ index + 1 }}</span>
<span class="item-time">{{ recording.time }}</span>
<span class="item-duration">{{ formatTime(recording.duration) }}</span>
</div>
<div class="item-content">
<audio controls :src="recording.url"></audio>
<div class="item-transcript" v-if="recording.transcript">{{ recording.transcript }}</div>
<div class="item-actions">
<el-button size="small" @click="playRecording(index)" :disabled="isPlaying">
<el-icon>
<VideoPlay />
</el-icon>
</el-button>
<el-button size="small" @click="deleteRecording(index)" type="danger">
<el-icon>
<Delete />
</el-icon>
</el-button>
</div>
</div>
</div>
</div>
<div v-if="isUnsupported" class="browser-warning">
<el-alert title="浏览器不支持" type="warning" description="您的浏览器不支持音频录制功能,请使用最新版Chrome或Edge浏览器" show-icon
closable />
</div>
<div v-if="error" class="error-message">
<el-alert :title="error" type="error" show-icon closable @close="error = null" />
</div>
</div>
</template>
<script setup>
import { ref, onUnmounted, onMounted } from 'vue'
import { Microphone, VideoPlay, Delete, Timer } from '@element-plus/icons-vue'
import RecordRTC from 'recordrtc'
import useSpeechRecognition from '@/utils/useSpeechRecognition'
const emit = defineEmits(['recording-saved'])
let audioContext = null
let analyser = null
let dataArray = null
let recorder = null
let stream = null
let recordingStartTime = 0
const waveCanvas = ref(null)
let ctx = null
let animationFrameId = null
const isRecording = ref(false)
const isUnsupported = ref(false)
const isProcessing = ref(false)
const isPlaying = ref(false)
const error = ref(null)
const recordingTime = ref(0)
let timeInterval = null
const recordings = ref([])
const checkBrowserSupport = () => {
isUnsupported.value = !(
navigator.mediaDevices &&
(window.AudioContext || window.webkitAudioContext) &&
RecordRTC
)
}
const resizeCanvas = () => {
if (!waveCanvas.value) return
const container = waveCanvas.value.parentElement
waveCanvas.value.width = container.clientWidth
waveCanvas.value.height = 46
}
const formatTime = (seconds) => {
const mins = Math.floor(seconds / 60).toString().padStart(2, '0')
const secs = Math.floor(seconds % 60).toString().padStart(2, '0')
return `${mins}:${secs}`
}
const updateRecordingTime = () => {
recordingTime.value = Math.floor((Date.now() - recordingStartTime) / 1000)
}
const recognizeSpeech = async (blob) => {
return new Promise((resolve) => {
setTimeout(() => {
const mockResponses = [
"这是一个测试录音内容",
"今天天气真好",
"请记录我的会议笔记",
"Vue3的录音功能实现",
"语音识别技术展示"
]
const randomText = mockResponses[Math.floor(Math.random() * mockResponses.length)]
resolve(randomText)
}, 1500)
})
}
onMounted(() => {
checkBrowserSupport()
if (waveCanvas.value) {
ctx = waveCanvas.value.getContext('2d')
resizeCanvas()
window.addEventListener('resize', resizeCanvas)
}
const savedRecordings = localStorage.getItem('audioRecordings')
if (savedRecordings) {
recordings.value = JSON.parse(savedRecordings)
}
})
onUnmounted(() => {
stopRecording()
window.removeEventListener('resize', resizeCanvas)
if (timeInterval) clearInterval(timeInterval)
})
const saveRecordingsToLocal = () => {
localStorage.setItem('audioRecordings', JSON.stringify(recordings.value))
}
const toggleRecording = async () => {
if (isRecording.value) {
await stopRecording()
} else {
await startRecording()
}
}
const startRecording = async () => {
if (isUnsupported.value) {
error.value = '当前浏览器不支持录音功能'
return
}
try {
error.value = null
isProcessing.value = true
recordingStartTime = Date.now()
recordingTime.value = 0
stream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
channelCount: 1
}
})
audioContext = new (window.AudioContext || window.webkitAudioContext)()
const source = audioContext.createMediaStreamSource(stream)
analyser = audioContext.createAnalyser()
analyser.fftSize = 256
source.connect(analyser)
dataArray = new Uint8Array(analyser.frequencyBinCount)
if (recorder) {
recorder.destroy()
recorder = null
}
recorder = new RecordRTC(stream, {
type: 'audio',
mimeType: 'audio/wav',
recorderType: RecordRTC.StereoAudioRecorder,
numberOfAudioChannels: 1,
sampleRate: 16000,
desiredSampRate: 16000,
disableLogs: true,
timeSlice: 100,
ondataavailable: (blob) => {
}
})
recorder.startRecording()
isRecording.value = true
isProcessing.value = false
timeInterval = setInterval(updateRecordingTime, 1000)
drawWave()
} catch (err) {
console.error('录音启动失败:', err)
error.value = `无法访问麦克风: ${err.message}`
isProcessing.value = false
await stopRecording()
}
}
const stopRecording = async () => {
if (!isRecording.value) return
isProcessing.value = true
try {
if (timeInterval) {
clearInterval(timeInterval)
timeInterval = null
}
if (animationFrameId) {
cancelAnimationFrame(animationFrameId)
animationFrameId = null
}
let blob = null
let duration = recordingTime.value
if (recorder) {
await new Promise((resolve) => {
recorder.stopRecording(() => {
try {
if (recorder) {
blob = recorder.getBlob()
console.log('录音完成,时长:', duration, '秒')
const url = URL.createObjectURL(blob)
recognizeSpeech(blob).then(text => {
const newRecording = {
url,
blob,
duration,
transcript: text,
time: new Date().toLocaleString()
}
recordings.value.unshift(newRecording)
saveRecordingsToLocal()
emit('recording-saved', newRecording)
})
}
resolve()
} catch (e) {
console.error('获取录音数据失败:', e)
error.value = '获取录音数据失败'
resolve()
}
})
})
}
} catch (err) {
console.error('录音停止错误:', err)
error.value = `录音停止时出错: ${err.message}`
} finally {
if (stream) {
stream.getTracks().forEach(track => track.stop())
stream = null
}
if (audioContext && audioContext.state !== 'closed') {
await audioContext.close().catch(err => console.error('关闭音频上下文失败', err))
}
if (recorder) {
recorder.destroy()
recorder = null
}
analyser = null
dataArray = null
isRecording.value = false
isProcessing.value = false
}
}
const playRecording = (index) => {
if (isPlaying.value) return
const recording = recordings.value[index]
if (!recording) return
isPlaying.value = true
const audio = new Audio(recording.url)
audio.play()
audio.onended = () => {
isPlaying.value = false
}
}
const deleteRecording = (index) => {
if (index >= 0 && index < recordings.value.length) {
URL.revokeObjectURL(recordings.value[index].url)
recordings.value.splice(index, 1)
saveRecordingsToLocal()
}
}
const drawWave = () => {
if (!ctx || !waveCanvas.value || !dataArray || !analyser || !isRecording.value) return
const { width, height } = waveCanvas.value
ctx.clearRect(0, 0, width, height)
const gradient = ctx.createLinearGradient(0, 0, width, 0)
gradient.addColorStop(0, '#6366f1')
gradient.addColorStop(0.5, '#a855f7')
gradient.addColorStop(1, '#8b5cf6')
analyser.getByteFrequencyData(dataArray)
const average = dataArray.reduce((acc, val) => acc + val, 0) / dataArray.length
const volumeMultiplier = Math.max(0.5, average / 128)
ctx.strokeStyle = gradient
ctx.lineWidth = 3
ctx.beginPath()
const time = Date.now() * 0.05 / 1000
for (let x = 0; x < width; x++) {
const dataIndex = Math.floor((x / width) * dataArray.length)
const audioFactor = dataArray[dataIndex] / 255
const y = height / 2 +
Math.sin(x * 0.02 + time) * 20 * volumeMultiplier * (0.5 + audioFactor) +
Math.sin(x * 0.03 + time * 1.3) * 10 * volumeMultiplier * audioFactor
if (x === 0) {
ctx.moveTo(x, y)
} else {
ctx.lineTo(x, y)
}
}
ctx.stroke()
ctx.strokeStyle = 'rgba(192, 132, 252, 0.4)'
ctx.lineWidth = 1.5
ctx.beginPath()
for (let x = 0; x < width; x++) {
const dataIndex = Math.floor((x / width) * dataArray.length)
const audioFactor = dataArray[dataIndex] / 255
const y = height / 2 +
Math.sin(x * 0.014 - time * 0.8) * 14 * volumeMultiplier * audioFactor
if (x === 0) {
ctx.moveTo(x, y)
} else {
ctx.lineTo(x, y)
}
}
ctx.stroke()
const barWidth = 2
const barGap = 3
const barCount = Math.floor(width / (barWidth + barGap))
const barMaxHeight = height * 0.7
ctx.fillStyle = gradient
for (let i = 0; i < barCount; i++) {
const dataIndex = Math.floor((i / barCount) * dataArray.length)
const barHeight = (dataArray[dataIndex] / 255) * barMaxHeight
if (barHeight > 0) {
const x = i * (barWidth + barGap)
const y = height - barHeight
const radius = barWidth / 2
ctx.beginPath()
ctx.moveTo(x + radius, y)
ctx.lineTo(x + barWidth - radius, y)
ctx.quadraticCurveTo(x + barWidth, y, x + barWidth, y + radius)
ctx.lineTo(x + barWidth, height - radius)
ctx.quadraticCurveTo(x + barWidth, height, x + barWidth - radius, height)
ctx.lineTo(x + radius, height)
ctx.quadraticCurveTo(x, height, x, height - radius)
ctx.lineTo(x, y + radius)
ctx.quadraticCurveTo(x, y, x + radius, y)
ctx.closePath()
ctx.fill()
}
}
if (isRecording.value) {
animationFrameId = requestAnimationFrame(drawWave)
}
}
</script>
<style scoped>
.audio-recorder-container {
width: 100%;
max-width: 800px;
margin: 0 auto;
padding: 20px;
border-radius: 8px;
background: #fff;
box-shadow: 0 2px 12px 0 rgba(0, 0, 0, 0.1);
}
.recorder-controls {
display: flex;
align-items: center;
gap: 20px;
margin-bottom: 20px;
}
.recording-info {
display: flex;
align-items: center;
gap: 15px;
color: #666;
}
.recording-indicator {
display: flex;
align-items: center;
gap: 5px;
color: #f56c6c;
}
.pulse {
display: inline-block;
width: 10px;
height: 10px;
border-radius: 50%;
background: #f56c6c;
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% {
transform: scale(0.95);
opacity: 0.7;
}
50% {
transform: scale(1.1);
opacity: 1;
}
100% {
transform: scale(0.95);
opacity: 0.7;
}
}
.wave-display {
margin: 20px 0;
border-radius: 4px;
overflow: hidden;
transition: all 0.3s;
opacity: 0.7;
background: #f5f7fa;
}
.wave-display.active {
opacity: 1;
box-shadow: 0 0 10px rgba(64, 158, 255, 0.3);
}
.recording-content {
margin: 20px 0;
padding: 15px;
border-radius: 4px;
background: #f5f7fa;
}
.recording-content h3 {
margin-top: 0;
margin-bottom: 10px;
color: #409EFF;
}
.transcript-text {
padding: 10px;
background: white;
border-radius: 4px;
border-left: 3px solid #409EFF;
}
.recordings-list {
margin-top: 30px;
}
.recordings-list h3 {
margin-bottom: 15px;
padding-bottom: 10px;
border-bottom: 1px solid #ebeef5;
color: #409EFF;
}
.recording-item {
margin-bottom: 15px;
padding: 15px;
border-radius: 4px;
background: #f5f7fa;
transition: all 0.3s;
}
.recording-item:hover {
background: #ebf0f5;
}
.item-header {
display: flex;
align-items: center;
gap: 15px;
margin-bottom: 10px;
font-size: 14px;
color: #666;
}
.item-index {
font-weight: bold;
color: #409EFF;
}
.item-content {
display: flex;
flex-direction: column;
gap: 10px;
}
.item-transcript {
padding: 10px;
background: white;
border-radius: 4px;
font-size: 14px;
}
.item-actions {
display: flex;
justify-content: flex-end;
gap: 5px;
}
audio {
width: 100%;
margin-top: 5px;
}
.browser-warning,
.error-message {
margin: 20px 0;
}
.el-button {
min-width: 120px;
}
</style>