Android AWS KVS WebRTC 通话声道切换到媒体音乐声道

201 阅读3分钟

在基于AWS KVS开发音视频建立双向直播流的过程中碰到一个问题,APP端不需要发送手机端的音视频,只需要接收设备端的音视频,但是在APP中播放远端音频(Remote Audio Track)的时候,音频一直占用通话声道,而不是音乐通道,那如何解决这个问题?

有二种方式,第一种方式是修改源码,第二种方式则是不使用默认的JavaAudioDeviceModule自定义创建。这里主要讲第二种方式。

一、创建自定义JavaAudioDeviceModule

主要是修改audioAttributes, 代码如下:

import android.content.Context
import android.media.*
import android.util.Log
import org.webrtc.audio.JavaAudioDeviceModule

/**
 * @description 用于将 WebRTC 的音频输出切换到媒体声道(STREAM_MUSIC),而不是默认的通话声道(STREAM_VOICE_CALL)
 */
class MediaAudioManager(private val context: Context) {
    
    /**
     * 创建 JavaAudioDeviceModule,并强制使用媒体通道输出
     */
    fun createJavaAudioDeviceModule(): JavaAudioDeviceModule {
        val audioAttributes = AudioAttributes.Builder()
            .setUsage(AudioAttributes.USAGE_MEDIA)
            .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
            .build()

        val audioRecordErrorCallback = object : JavaAudioDeviceModule.AudioRecordErrorCallback {
            override fun onWebRtcAudioRecordInitError(errorMessage: String?) {
                Log.e("MediaAudioManager", "onWebRtcAudioRecordInitError: $errorMessage")
            }

            override fun onWebRtcAudioRecordStartError(
                errorCode: JavaAudioDeviceModule.AudioRecordStartErrorCode?,
                errorMessage: String?
            ) {
                Log.e("MediaAudioManager", "onWebRtcAudioRecordStartError: $errorMessage")
            }

            override fun onWebRtcAudioRecordError(errorMessage: String?) {
                Log.e("MediaAudioManager", "onWebRtcAudioRecordError: $errorMessage")
            }
        }

        val audioTrackErrorCallback = object : JavaAudioDeviceModule.AudioTrackErrorCallback {
            override fun onWebRtcAudioTrackInitError(errorMessage: String?) {
                Log.e("MediaAudioManager", "onWebRtcAudioTrackInitError: $errorMessage")
            }

            override fun onWebRtcAudioTrackStartError(
                errorCode: JavaAudioDeviceModule.AudioTrackStartErrorCode?,
                errorMessage: String?
            ) {
                Log.e("MediaAudioManager", "onWebRtcAudioTrackStartError: $errorMessage")
            }

            override fun onWebRtcAudioTrackError(errorMessage: String?) {
                Log.e("MediaAudioManager", "onWebRtcAudioTrackError: $errorMessage")
            }
        }

        // 创建 JavaAudioDeviceModule 并使用 MEDIA 声道
        return JavaAudioDeviceModule.builder(context)
            .setAudioAttributes(audioAttributes)
            .setUseHardwareAcousticEchoCanceler(true)
            .setUseHardwareNoiseSuppressor(true)
            .setAudioRecordErrorCallback(audioRecordErrorCallback)
            .setAudioTrackErrorCallback(audioTrackErrorCallback)
            .createAudioDeviceModule()
    }
}

在构建PeerConnectionFactory时使用:

PeerConnectionFactory.builder().apply {
    //解决声音播放占用通话通道的问题
    val mediaAudioManager = MediaAudioManager(mContext)
    val audioModule = mediaAudioManager.createJavaAudioDeviceModule()
    setAudioDeviceModule(audioModule)
    
    ...其他配置略...
    
  }.createPeerConnectionFactory()

二、声音通道的控制以及占用的问题

一般情况下都会有一个按钮控制静音,静音或开启的控制逻辑需要区分用户是否佩戴耳机,佩戴耳机的情况下不能开启扬声器避免暴露隐私,具体代码如下:

/**
 * 开启或关闭声音
 */
fun enableAudio(enable: Boolean) {
    if (enable) {
        //请求声道焦点,占用声道
        requestAudioFocus()
        audioManager?.mode = AudioManager.MODE_NORMAL
        //判断是否佩戴耳机来开启或关闭扬声器
        setSpeakerphoneOn(!isHeadphonesPlugged())
    } else {
        //释放声道焦点
        abandonAudioFocus()
        originalAudioMode?.let { audioManager?.mode = it }
        originalSpeakerphoneOn?.let { audioManager?.isSpeakerphoneOn = it }
    }
    try {
        remoteAudioTrack?.setEnabled(enable)
    } catch (ex: Exception) {
        Log.d(TAG,"enable Audio but remote Audio Track has been disposed")
    }
}

检测是否佩戴有线或蓝牙耳机:

/**
 * 判断是否佩戴耳机,佩戴耳机音频不外放
 */
private fun isHeadphonesPlugged(): Boolean {
    val audioDevices = audioManager?.getDevices(AudioManager.GET_DEVICES_OUTPUTS)
    if (audioDevices != null) {
        for (deviceInfo in audioDevices) {
            if (deviceInfo.type == AudioDeviceInfo.TYPE_WIRED_HEADPHONES
                || deviceInfo.type == AudioDeviceInfo.TYPE_WIRED_HEADSET
                || deviceInfo.type == AudioDeviceInfo.TYPE_BLUETOOTH_A2DP
            ) {
                return true
            }
        }
    }
    return false
}

打开手机扬声器的代码:

/**
 * 设置扬声器的开启和关闭
 */
@SuppressLint("NewApi")
private fun setSpeakerphoneOn(enabled: Boolean) {
    if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {   //Android 12及以上
        val preferredDevice = if (enabled) {
            // 获取扬声器设备
            val audioDevices = audioManager?.availableCommunicationDevices
            audioDevices?.firstOrNull { it.type == AudioDeviceInfo.TYPE_BUILTIN_SPEAKER }
        } else {
            // 恢复默认设备
            null
        }
        preferredDevice?.let {
            audioManager?.setCommunicationDevice(it)
        } ?: run {
            audioManager?.clearCommunicationDevice()
        }
    } else {
        // 旧版本兼容方式 (Android 11及以下)
        audioManager?.mode = if (enabled) AudioManager.MODE_NORMAL else AudioManager.MODE_IN_COMMUNICATION
        audioManager?.isSpeakerphoneOn = enabled
    }
}

请求声道焦点并占用和释放声道占用的代码,注意断开WebRTC的地方也要释放声道占用

private var focusRequest: AudioFocusRequest? = null   //声道焦点

/**
  * 请求声道焦点并占用,比如用户正在播放歌曲,可以暂停播放歌曲改为播放WebRTC音频
  */
private fun requestAudioFocus() {
     val focusChangeListener = AudioManager.OnAudioFocusChangeListener { focusChange ->
         when (focusChange) {
             AudioManager.AUDIOFOCUS_GAIN -> {
                 // 获得焦点,可以播放WebRTC音频
                 Log.d(TAG,"AUDIOFOCUS_GAIN")
             }

             AudioManager.AUDIOFOCUS_LOSS -> {
                 // 永久失去焦点,别的App接管了,比如音乐播放器恢复
                 Log.d(TAG,"AUDIOFOCUS_LOSS")
             }

             AudioManager.AUDIOFOCUS_LOSS_TRANSIENT -> {
                 // 临时失去焦点(短时间内别的App在播放),应暂停音频
                 Log.d(TAG,"AUDIOFOCUS_LOSS_TRANSIENT")
             }

             AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK -> {
                 // 可“压低音量”继续播放(不建议WebRTC使用)
                 Log.d(TAG,"AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK")
             }
         }
     }

     // Android 8.0+ 使用 AudioFocusRequest.Builder
     focusRequest = AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN_TRANSIENT)
         .setOnAudioFocusChangeListener(focusChangeListener)
         .setAudioAttributes(
             AudioAttributes.Builder()
                 .setUsage(AudioAttributes.USAGE_MEDIA)
                 .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
                 .build()
         )
         .build()

     val result = audioManager?.requestAudioFocus(focusRequest!!)
     if (result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED) {
         Log.d(TAG,"Audio focus granted. Other apps will pause.")
     } else {
         Log.d(TAG,"Audio focus request failed.")
     }
 }
 
 /**
     * 释放声道焦点
     */
    private fun abandonAudioFocus() {
        focusRequest?.let {
            audioManager?.abandonAudioFocusRequest(it)
            FoxxLog.d("Audio focus released.")
        }
    }

部分类或对象找不到需要参考AWS KVS的Demo。做这类需求的应该能看明白。