基于Android 9.0
1.概述
在《Android 音频子系统--08:AudioTrack创建》章节中,已经介绍了AudioTrack创建过程。在AudioFlinger::createTrack()方法中有一个重要的过程,就是通过属性获取output。
一个播放线程(PlaybackThread)对应一个output,一个output对应多个devices。最终的音频数据是需要传输到指定的devices上。所以在创建AudioTrack时,需要根据属性去查找合适播放的device。并返回一个output句柄,供AudioFlinger操作。
2.dumpsys输出设备
2.1 dumpsys命令
Android中提供了查找音频设备的命令,命令如下:
dumpsys media.audio_policy
2.2 dumpsys实现原理
下面先看看这个dumpsys命令是怎么实现的。
dumpsys的源码结构其实很简单,只有一个dumpsys.cpp
// frameworks/native/cmds/dumpsys/dumpsys.cpp
int Dumpsys::main(int argc, char* const argv[]) {
// ...
const size_t N = services.size();
if (N > 1) {
// first print a list of the current services
// 打印出所以得service
aout << "Currently running services:" << endl;
for (size_t i=0; i<N; i++) {
sp<IBinder> service = sm_->checkService(services[i]);
if (service != nullptr) {
bool skipped = IsSkipped(skippedServices, services[i]);
aout << " " << services[i] << (skipped ? " (skipped)" : "") << endl;
}
}
}
if (showListOnly) {
return 0;
}
// 遍历所有的servie
for (size_t i = 0; i < N; i++) {
// 获取service name,比如: media.audio_policy
const String16& serviceName = services[i];
if (IsSkipped(skippedServices, serviceName)) continue;
// 开启dump线程
if (startDumpThread(serviceName, args) == OK) {
// ...
bool dumpComplete = (status == OK);
stopDumpThread(dumpComplete);
}
}
return 0;
}
继续看一下startDumpThread方法:
// frameworks/native/cmds/dumpsys/dumpsys.cpp
status_t Dumpsys::startDumpThread(const String16& serviceName, const Vector<String16>& args) {
// 先检查一下service的名称是否存在
sp<IBinder> service = sm_->checkService(serviceName);
if (service == nullptr) {
aerr << "Can't find service: " << serviceName << endl;
return NAME_NOT_FOUND;
}
int sfd[2];
if (pipe(sfd) != 0) {
aerr << "Failed to create pipe to dump service info for " << serviceName << ": "
<< strerror(errno) << endl;
return -errno;
}
redirectFd_ = unique_fd(sfd[0]);
unique_fd remote_end(sfd[1]);
sfd[0] = sfd[1] = -1;
// dump blocks until completion, so spawn a thread..
activeThread_ = std::thread([=, remote_end{std::move(remote_end)}]() mutable {
// 调用service的dump方法
int err = service->dump(remote_end.get(), args);
// It'd be nice to be able to close the remote end of the socketpair before the dump
// call returns, to terminate our reads if the other end closes their copy of the
// file descriptor, but then hangs for some reason. There doesn't seem to be a good
// way to do this, though.
remote_end.reset();
if (err != 0) {
aerr << "Error dumping service info: (" << strerror(err) << ") "
<< serviceName << endl;
}
});
return OK;
}
AudioPolicyService注册到ServiceManager中设置的名称为media.audio_policy(AudioPolicyService.h),执行dumpsys命令时通过函数checkService来找到具体的service,并执行该service的dump方法,达到dump service的目的。下面是实现的dump方法:
status_t AudioPolicyManager::dump(int fd)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
result.append(buffer);
snprintf(buffer, SIZE, " Primary Output: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
result.append(buffer);
std::string stateLiteral;
AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
result.append(buffer);
snprintf(buffer, SIZE, " Force use for communications %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
result.append(buffer);
snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
result.append(buffer);
snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
result.append(buffer);
write(fd, result.string(), result.size());
mAvailableOutputDevices.dump(fd, String8("Available output"));
mAvailableInputDevices.dump(fd, String8("Available input"));
mHwModulesAll.dump(fd);
mOutputs.dump(fd);
mInputs.dump(fd);
mVolumeCurves->dump(fd);
mEffects.dump(fd);
mAudioPatches.dump(fd);
mPolicyMixes.dump(fd);
return NO_ERROR;
}
2.3 dumpsys结果
Earpiece(听筒),Speaker(外放),Telephony Tx(用于传输路由到手机无线装置的音频)
AudioPolicyManager Dump: 0xae4b7200
Primary Output: 13
Phone state: AUDIO_MODE_NORMAL
Force use for communications 0
Force use for media 10
Force use for record 0
Force use for dock 8
Force use for system 0
Force use for hdmi system audio 0
Force use for encoded surround output 0
TTS output not available
Master mono: off
- Available output devices:
Device 1:
- id: 1
- tag name: Earpiece
- type: AUDIO_DEVICE_OUT_EARPIECE
- Profiles:
Profile 0:
- format: AUDIO_FORMAT_PCM_16_BIT
- sampling rates:48000
- channel masks:0x0010
Device 2:
- id: 2
- tag name: Speaker
- type: AUDIO_DEVICE_OUT_SPEAKER
- Profiles:
Profile 0:
- format: AUDIO_FORMAT_PCM_16_BIT
- sampling rates:48000
- channel masks:0x0003
Device 3:
- id: 6
- tag name: Telephony Tx
- type: AUDIO_DEVICE_OUT_TELEPHONY_TX
- Profiles:
Profile 0:
- format: AUDIO_FORMAT_PCM_16_BIT
- sampling rates:8000, 16000
- channel masks:0x0001, 0x0003
2.4 对应配置信息
AudioPolicyManager中解析配置文件,配置文件位置在/vendor/etc/audio_policy_configuration.xml(终端设备)。
执行解析过程:
mVolumeCurves = new VolumeCurvesCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices,
mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast<VolumeCurvesCollection *>(mVolumeCurves));
if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
解析结果显示:
<modules>
<!-- Primary Audio HAL -->
<module name="primary" halVersion="2.0">
<attachedDevices>
<item>Earpiece</item>
<item>Speaker</item>
<item>Telephony Tx</item>
<item>Built-In Mic</item>
<item>Built-In Back Mic</item>
<item>FM Tuner</item>
<item>Telephony Rx</item>
</attachedDevices>
3.选择设备类型
上面已经dumpsys出了具体的设备信息,在使用AudioTrack时会根据属性去选择相应的设备进行播放。输出设备类型的选择(例如Speak,耳机(usb接口,蓝牙,或者primary)),无论是哪种类型的音频播放(音乐,铃声,电话等),最终都会在Native的AudioTrack创建一个实例。这里以Music为例来说明:
AudioTrack::AudioTrack()
---> AudioTrack::set()
--------> AudioTrack::createTrack_l()
就从createTrack_l()方法开始分析:
// frameworks/av/media/libaudioclient/AudioTrack.cpp
status_t AudioTrack::createTrack_l()
{
status_t status;
bool callbackAdded = false;
//获取audioFlinger的代理
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
status = NO_INIT;
goto exit;
}
// ...
// 设置一些属性参数
IAudioFlinger::CreateTrackInput input;
if (mStreamType != AUDIO_STREAM_DEFAULT) {
stream_type_to_audio_attributes(mStreamType, &input.attr);
} else {
input.attr = mAttributes;
}
input.config = AUDIO_CONFIG_INITIALIZER;
input.config.sample_rate = mSampleRate;
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
input.config.offload_info = mOffloadInfoCopy;
input.clientInfo.clientUid = mClientUid;
input.clientInfo.clientPid = mClientPid;
input.clientInfo.clientTid = -1;
IAudioFlinger::CreateTrackOutput output;
// 调用audioFlinger的createTrack(),这里最终选择设备之后会返回一个output
sp<IAudioTrack> track = audioFlinger->createTrack(input,
output,
&status);
// ...
// 省略部分代码
// ...
}
3.1 getOutputForAttr()获取output输出设备
继续看到audioFlinger->createTrack(),代码如下:
sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
CreateTrackOutput& output,
status_t *status)
{
// ......
// 通过音频属性去获取output
lStatus = AudioSystem::getOutputForAttr(&input.attr, &output.outputId, sessionId, &streamType,
clientPid, clientUid, &input.config, input.flags,
&output.selectedDeviceId, &portId);
// ......
}
这里会去调用AudioSystem的getOutputForAttr(),代码如下:
status_t AudioSystem::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getOutputForAttr(attr, output, session, stream, pid, uid,
config,
flags, selectedDeviceId, portId);
}
这里最终会调用到AudioPolicyManager中:
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId)
{
audio_attributes_t attributes;
// 当前Music场景下attr为空
if (attr != NULL) {
if (!isValidAttributes(attr)) {
ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
attr->usage, attr->content_type, attr->flags,
attr->tags);
return BAD_VALUE;
}
attributes = *attr;
} else {
if (*stream < AUDIO_STREAM_MIN || *stream >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("getOutputForAttr(): invalid stream type");
return BAD_VALUE;
}
/* 对于当前music的场景,attributes会被如下赋值:AudioPolicyHelper.h
attr->content_type = AUDIO_CONTENT_TYPE_MUSIC;
attr->usage = AUDIO_USAGE_MEDIA;
*/
stream_type_to_audio_attributes(*stream, &attributes);
}
// ......
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
// 关键点1:根据当前音频的属性,获取当前track的音频策略
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
// 关键点2:根据当前track的音频策略,获取当前音频的输出设备
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
// 关键点3:根据音频设备,音频输出标识,format等选择输出路径
*output = getOutputForDevice(device, session, *stream, config, flags);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
}
DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
*selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
ALOGV(" getOutputForAttr() returns output %d selectedDeviceId %d", *output, *selectedDeviceId);
return NO_ERROR;
}
上述代码中有三个重要的方法,getStrategyForAttr(),getDeviceForStrategy()和getOutputForDevice(),下面将逐一分析。
3.2 getStrategyForAttr()获取音频策略
根据提供的音频属性获取到音频策略,主要代码如下:
// AudioPolicyManager.cpp
uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
// flags to strategy mapping
if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
return (uint32_t) STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
}
if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
}
// usage to strategy mapping
//Music输入的usage为AUDIO_USAGE_MEDIA,所以mEngine返回的策略为STRATEGY_MEDIA
return static_cast<uint32_t>(mEngine->getStrategyForUsage(attr->usage));
}
继续看看mEngine的getStrategyForUsage():
// frameworks/av/services/audiopolicy/enginedefault/src/Engine.cpp
routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
{
// usage to strategy mapping
switch (usage) {
case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
return STRATEGY_ACCESSIBILITY;
case AUDIO_USAGE_MEDIA:
case AUDIO_USAGE_GAME:
case AUDIO_USAGE_ASSISTANT:
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
return STRATEGY_MEDIA;
case AUDIO_USAGE_VOICE_COMMUNICATION:
return STRATEGY_PHONE;
case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
return STRATEGY_DTMF;
case AUDIO_USAGE_ALARM:
case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
return STRATEGY_SONIFICATION;
case AUDIO_USAGE_NOTIFICATION:
case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
case AUDIO_USAGE_NOTIFICATION_EVENT:
return STRATEGY_SONIFICATION_RESPECTFUL;
case AUDIO_USAGE_UNKNOWN:
default:
return STRATEGY_MEDIA;
}
}
3.3 getDeviceForStrategy()选取输出设备
输入getStrategyForAttr()获取到的策略STRATEGY_MEDIA,然后再去获取对应的输出设备。
// AudioPolicyManager.cpp
audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
bool fromCache)
{
// Check if an explicit routing request exists for a stream type corresponding to the
// specified strategy and use it in priority over default routing rules.
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
if (getStrategy((audio_stream_type_t)stream) == strategy) {
audio_devices_t forcedDevice =
mOutputRoutes.getActiveDeviceForStream(
(audio_stream_type_t)stream, mAvailableOutputDevices);
if (forcedDevice != AUDIO_DEVICE_NONE) {
return forcedDevice;
}
}
}
if (fromCache) {
ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
strategy, mDeviceForStrategy[strategy]);
return mDeviceForStrategy[strategy];
}
// 这里输入的strategy为STRATEGY_MEDIA
return mEngine->getDeviceForStrategy(strategy);
}
继续看看mEngine的getDeviceForStrategy():
audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
{
DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices();
DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices();
const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
return getDeviceForStrategyInt(strategy, availableOutputDevices,
availableInputDevices, outputs, (uint32_t)AUDIO_DEVICE_NONE);
}
这里输入所有的输入和输出设备,根据策略strategy去选择合适的device:
audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
DeviceVector availableOutputDevices,
DeviceVector availableInputDevices,
const SwAudioOutputCollection &outputs,
uint32_t outputDeviceTypesToIgnore) const
{
uint32_t device = AUDIO_DEVICE_NONE;
uint32_t availableOutputDevicesType =
availableOutputDevices.types() & ~outputDeviceTypesToIgnore;
switch (strategy) {
// ...
// 省略部分代码
// ...
case STRATEGY_REROUTING:
// music的策略
case STRATEGY_MEDIA: {
uint32_t device2 = AUDIO_DEVICE_NONE;
if (strategy != STRATEGY_SONIFICATION) {
// no sonification on remote submix (e.g. WFD)
if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
String8("0")) != 0) {
device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
}
}
// 通过过程中的策略
if (isInCall() && (strategy == STRATEGY_MEDIA)) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
break;
}
// 优先选择AUDIO_DEVICE_OUT_HEARING_AID
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
}
// 优先选择蓝牙设备播放
if ((device2 == AUDIO_DEVICE_NONE) &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
}
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
}
}
// ... 省略其他的选择
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
}
int device3 = AUDIO_DEVICE_NONE;
if (strategy == STRATEGY_MEDIA) {
// ARC, SPDIF and AUX_LINE can co-exist with others.
device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
}
device2 |= device3;
// device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
// STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
device |= device2;
// If hdmi system audio mode is on, remove speaker out of output list.
if ((strategy == STRATEGY_MEDIA) &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
device &= ~AUDIO_DEVICE_OUT_SPEAKER;
}
// for STRATEGY_SONIFICATION:
// if SPEAKER was selected, and SPEAKER_SAFE is available, use SPEAKER_SAFE instead
if ((strategy == STRATEGY_SONIFICATION) &&
(device & AUDIO_DEVICE_OUT_SPEAKER) &&
(availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
device &= ~AUDIO_DEVICE_OUT_SPEAKER;
}
} break;
default:
ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
break;
}
if (device == AUDIO_DEVICE_NONE) {
ALOGV("getDeviceForStrategy() no device found for strategy %d", strategy);
device = mApmObserver->getDefaultOutputDevice()->type();
ALOGE_IF(device == AUDIO_DEVICE_NONE,
"getDeviceForStrategy() no default device defined");
}
ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
return device;
}
在/system/media/audio/include/system/audio-base.h中定义,它是对audio设备的定义:
enum {
AUDIO_DEVICE_NONE = 0u, // 0x0
AUDIO_DEVICE_BIT_IN = 2147483648u, // 0x80000000
AUDIO_DEVICE_BIT_DEFAULT = 1073741824u, // 0x40000000
AUDIO_DEVICE_OUT_EARPIECE = 1u, // 0x1 // 听筒
AUDIO_DEVICE_OUT_SPEAKER = 2u, // 0x2 // 扬声器
AUDIO_DEVICE_OUT_WIRED_HEADSET = 4u, // 0x4 // 线控耳机,可以通过耳机控制远端播放、暂停、音量调节等功能的耳机
AUDIO_DEVICE_OUT_WIRED_HEADPHONE = 8u, // 0x8 // 普通耳机,只能听,不能操控播放
AUDIO_DEVICE_OUT_BLUETOOTH_SCO = 16u, // 0x10 // 单声道蓝牙耳机,十进制32
AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 32u, // 0x20 // 车载免提蓝牙设备,十进制64
AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 64u, // 0x40 // 立体声蓝牙耳机
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP = 128u, // 0x80
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 256u, // 0x100
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 512u, // 0x200
...
3.4 getOutputForDevice()获取output
上面的getDeviceForStrategy()已经获取到了device,然后再去获取对应的output。这个函数是根据我们下发的device,format,以及请求的audio policy从系统保存的众多音频路径中选择符合要求的路径。
*output = getOutputForDevice(device, session, *stream, config, flags);
// AudioPolicyManager.cpp
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
const audio_config_t *config,
audio_output_flags_t *flags)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
// this should normally be set appropriately in the policy configuration file
// 根据flag选择对应的类型
if ((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
*flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
// music从这里选择
} else if (/* stream == AUDIO_STREAM_MUSIC && */
*flags == AUDIO_OUTPUT_FLAG_NONE &&
property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
// use DEEP_BUFFER as default output for music stream type
*flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
}
if (stream == AUDIO_STREAM_TTS) {
*flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
audio_is_linear_pcm(config->format)) {
*flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
}
// ...
// 省略部分代码
// ...
// open a non direct output
// for non direct outputs, only PCM is supported
// 到AudioTrack了,基本都是pcm码流了
if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
// 这步的意思就是在系统保存的mOutputs中选择包含device的音频路径列表
// 注意这依然是个列表,因为包含device的音频路径很有可能不止一条
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
// 将请求的policy flags中剔除掉direct
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
//根据音频格式,policy flags再从包含device的音频路径列表中选择最适合的音频路径
output = selectOutput(outputs, *flags, config->format);
}
ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
"sampling rate %d, format %#x, channels %#x, flags %#x",
stream, config->sample_rate, config->format, config->channel_mask, *flags);
return output;
}
先来看看getOutputsForDevice()方法:
SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(
audio_devices_t device,
const SwAudioOutputCollection& openOutputs)
{
SortedVector<audio_io_handle_t> outputs;
ALOGVV("getOutputsForDevice() device %04x", device);
for (size_t i = 0; i < openOutputs.size(); i++) {
ALOGVV("output %zu isDuplicated=%d device=%04x",
i, openOutputs.valueAt(i)->isDuplicated(),
openOutputs.valueAt(i)->supportedDevices());
if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i));
outputs.add(openOutputs.keyAt(i));
}
}
return outputs;
}
其实就是筛选出支持对应device的outputs。下面再看看怎样从这些outputs里面合适的一个。
/*
* 函数参数说明
* outputs:包含目标device的音频路径列表
* flags:应用请求的Policy Flags
* format:应用请求的格式
*/
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format)
{
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevice()).
// The priority is as follows:
// 1: the output with the highest number of requested policy flags
// 2: the output with the bit depth the closest to the requested one
// 3: the primary output
// 4: the first output in the list
//如果满足要求的音频路径列表为空,那么很遗憾没有音频路径提供,返回0
if (outputs.size() == 0) {
return AUDIO_IO_HANDLE_NONE;
}
//如果满足要求的音频路径只有一条,那也没有选择的可能了,返回这条即可
if (outputs.size() == 1) {
return outputs[0];
}
int maxCommonFlags = 0;
audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
for (audio_io_handle_t output : outputs) {
// 逐一取出满足要求的音频路径
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (!outputDesc->isDuplicated()) {
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
//首先判断音频输出标识是否是不需要混音(HDMI等),如果是,进一步判断Format是否匹配,如果不匹配,则直接放弃这条路径
if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (format != outputDesc->mFormat) {
continue;
}
} else if (!audio_is_linear_pcm(format)) {
continue;
}
//之后从所有满足要求的音频路径中选择和请求的格式最匹配的一条音频路径
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormat, format)) {
outputForFormat = output;
bestFormat = outputDesc->mFormat;
}
}
//从音频路径列表Policy Flags中选择尽可能多的满足请求的Flags的音频路径
int commonFlags = popcount(outputDesc->mProfile->getFlags() & flags);
if (commonFlags >= maxCommonFlags) {
if (commonFlags == maxCommonFlags) {
if (format != AUDIO_FORMAT_INVALID
&& AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormatForFlags, format)) {
outputForFlags = output;
bestFormatForFlags = outputDesc->mFormat;
}
} else {
outputForFlags = output;
maxCommonFlags = commonFlags;
bestFormatForFlags = outputDesc->mFormat;
}
ALOGV("selectOutput() commonFlags for output %d, %04x", output, commonFlags);
}
//确认音频路径中是否包含主输出,如果有,直接选择第一条满足的即可
if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
outputForPrimary = output;
}
}
}
if (outputForFlags != AUDIO_IO_HANDLE_NONE) {
return outputForFlags;
}
if (outputForFormat != AUDIO_IO_HANDLE_NONE) {
return outputForFormat;
}
if (outputForPrimary != AUDIO_IO_HANDLE_NONE) {
return outputForPrimary;
}
return outputs[0];
}
经过上面的流出就可以筛选出最合适的output了。这里再来看看selectOutput的第二个参数有哪些定义:
// system/media/audio/include/system/audio-base.h
typedef enum {
AUDIO_OUTPUT_FLAG_NONE = 0, // 0x0
AUDIO_OUTPUT_FLAG_DIRECT = 1, // 0x1
AUDIO_OUTPUT_FLAG_PRIMARY = 2, // 0x2
AUDIO_OUTPUT_FLAG_FAST = 4, // 0x4
AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 8, // 0x8
AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD = 16, // 0x10
AUDIO_OUTPUT_FLAG_NON_BLOCKING = 32, // 0x20
AUDIO_OUTPUT_FLAG_HW_AV_SYNC = 64, // 0x40
AUDIO_OUTPUT_FLAG_TTS = 128, // 0x80
AUDIO_OUTPUT_FLAG_RAW = 256, // 0x100
AUDIO_OUTPUT_FLAG_SYNC = 512, // 0x200
AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO = 1024, // 0x400
AUDIO_OUTPUT_FLAG_DIRECT_PCM = 8192, // 0x2000
AUDIO_OUTPUT_FLAG_MMAP_NOIRQ = 16384, // 0x4000
AUDIO_OUTPUT_FLAG_VOIP_RX = 32768, // 0x8000
} audio_output_flags_t;
其代表的就是各种音频标识,系统中的音频路径可以通过如下命令查找:
adb shell dumpsys media.audio_policy
HW Modules dump:
- HW Module 1:
- name: primary
- handle: 10
- version: 2.0
- outputs:
output 0:
- name: primary output
- Profiles:
Profile 0:
- format: AUDIO_FORMAT_PCM_16_BIT
- sampling rates:48000
- channel masks:0x0003
// 对应selectOutput的第二个参数
- flags: 0x0006 (AUDIO_OUTPUT_FLAG_PRIMARY|AUDIO_OUTPUT_FLAG_FAST)
- Supported devices:
Device 1:
- id: 1
- tag name: Earpiece
- type: AUDIO_DEVICE_OUT_EARPIECE
4.加载设备信息
我们已经知道了创建AudioTrack时会去选择输出设备的类型,这里我们看看这些设备是怎样加载到系统的。
4.1 音频module
在《Android 音频子系统--06:解析audio_policy_configuration.xml》章节中,我们知道在AudioPolicyManager的构造方法中会去解析audio_policy_configuration.xml文件,解析出来的设备最终会被加载到系统中。
解析的/vendor/etc/audio_policy_configuration.xml配置文件同时包含:a2dp_audio_policy_configuration.xml以及usb_audio_policy_configuration.xml和r_submix_audio_policy_configuration.xml这三个xml文件,因此总共会加入三个hardware module。
<!-- A2dp Audio HAL --> 蓝牙
<xi:include href="/vendor/etc/a2dp_audio_policy_configuration.xml"/>
<!-- Usb Audio HAL --> usb
<xi:include href="/vendor/etc/usb_audio_policy_configuration.xml"/>
<!-- Remote Submix Audio HAL --> 远程终端
<xi:include href="/vendor/etc/r_submix_audio_policy_configuration.xml"/>
audio_policy_configuration.xml文件中包含不同类型的module:
<module name="primary" halVersion="2.0">
<module name="a2dp" halVersion="2.0">
<module name="usb" halVersion="2.0">
<module name="r_submix" halVersion="2.0">
另外可以通过dump下media.audio_policy服务来看当前系统支持的module:
HW Modules dump:
- HW Module 1:
- name: primary
...
- HW Module 2:
- name: a2dp
...
- HW Module 3:
- name: usb
...
- HW Module 4:
- name: r_submix
这里会有关于对应module以及device的具体配置。对于每个HW Module我们对于dump数据做下初步分析:
HW Modules dump:
- HW Module 1:
- name: primary //具体的音频硬件设备类型
- handle: 10
- version: 2.0
- outputs: //由于应用层面有不同的stream,对应到配置也会有多个output
output 0:
- name: primary output //匹配到具体的stream类型
- Profiles:
Profile 0: //不同的stream也会有多个音频编码格式,因此存在多个profile
- format: AUDIO_FORMAT_PCM_16_BIT
- sampling rates:48000
- channel masks:0x0003
- flags: 0x0006 (AUDIO_OUTPUT_FLAG_PRIMARY|AUDIO_OUTPUT_FLAG_FAST)
- Supported devices://当然同样的stream也会存在多种device的输出方式(耳机,Speaker等)
Device 1:
- id: 1
- tag name: Earpiece
- type: AUDIO_DEVICE_OUT_EARPIECE
Device 2:
- id: 2
- tag name: Speaker
- type: AUDIO_DEVICE_OUT_SPEAKER
Device 3:
- tag name: Wired Headset
- type: AUDIO_DEVICE_OUT_WIRED_HEADSET
Device 4:
- tag name: Wired Headphones
- type: AUDIO_DEVICE_OUT_WIRED_HEADPHONE
每种硬件接口类型,会根据stream的应用场景区分多个output,最后我们需要在这些output中选择满足我们要求的output,再从output 中选择最终的device。
4.2 音频路径
对于audio相关的音频路径是如何被加载到系统,同时对于每个音频路径是如何设置其默认输出设备的。系统是如何将众多的output保存下来的,具体分析如下:
// AudioPolicyManager.cpp
status_t AudioPolicyManager::initialize() {
// ...
// mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
// open all output streams needed to access attached devices
audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
// 遍历所有的module
// 这里的mHwModulesAll就是上面得到信息(primary,a2dp,usb,r_submix)
for (const auto& hwModule : mHwModulesAll) {
// 逐个加载音频的硬件库,以方便之后的调用
hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName()));
if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) {
ALOGW("could not open HW module %s", hwModule->getName());
continue;
}
mHwModules.push_back(hwModule);
// open all output streams needed to access attached devices
// except for direct output streams that are only opened when they are actually
// required by an app.
// This also validates mAvailableOutputDevices list
// 这里的每个OutputProfile对应一个output
// outProfile->getTagName().string()对应的就是output的名称
for (const auto& outProfile : hwModule->getOutputProfiles()) {
if (!outProfile->canOpenNewIo()) {
ALOGE("Invalid Output profile max open count %u for profile %s",
outProfile->maxOpenCount, outProfile->getTagName().c_str());
continue;
}
//如果当前的音频输出路径没有对应的device支持,则放弃这条路径
if (!outProfile->hasSupportedDevices()) {
ALOGW("Output profile contains no device on module %s", hwModule->getName());
continue;
}
//如果当前音频输出路径支持TTS,则标注
if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) {
mTtsOutputAvailable = true;
}
//由于direct(不需要混音,例如HDMI输出)的音频流不需要创建direct的PlaybackThread,因此也不需要加到系统默认的outputs中
if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
continue;
}
//获取当前音频路径支持的输出设备有哪些(Speaker,耳机等)
audio_devices_t profileType = outProfile->getSupportedDevicesType();
/*
* 1.这里首先说明,默认的输出设备是Speaker
* 2.因此,这儿就是判断当前音频路径的输出设备中是否包含了Speaker,如果是,那么设置profileType 为Speaker
* 3.如果当前音频路径中没有包含Speaker,那么从可用的输出设备中按照顺序找出一个设备出来。(耳机,Speaker,听筒)
*/
if ((profileType & mDefaultOutputDevice->type()) != AUDIO_DEVICE_NONE) {
profileType = mDefaultOutputDevice->type();
} else {
// chose first device present in profile's SupportedDevices also part of
// outputDeviceTypes
profileType = outProfile->getSupportedDeviceForType(outputDeviceTypes);
}
//如果当前的音频路径中并没有可用的输出设备,那么果断的放弃这条音频路径
if ((profileType & outputDeviceTypes) == 0) {
continue;
}
//这里根据outProfile(当前音频路径配置)和mpClientInterface创建一个outputDesc的描述符
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
//从当前的音频路径中获取所有支持的输出设备
const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
//从所有的设备中查找和选中的设备(默认是Speaker)匹配的设备列表
const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
//如果列表元素个数大于0,则取出第一个元素的Address字段,否则为空。
//这个字段一般都是为空
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
//这里的output需要说明下(后面有用到),openOutput会将output初始化为一个全局唯一的变量,而且这个全局唯一的变量会关联到PlaybackThread线程的句柄。
//使用音频路径和具体的设备打开该输出设备
status_t status = outputDesc->open(nullptr, profileType, address,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
ALOGW("Cannot open output stream for device %08x on hw module %s",
outputDesc->mDevice,
hwModule->getName());
} else {
for (const auto& dev : supportedDevices) {
ssize_t index = mAvailableOutputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
mAvailableOutputDevices[index]->attach(hwModule);
}
}
if (mPrimaryOutput == 0 &&
outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
mPrimaryOutput = outputDesc;
}
//将全局唯一的变量和音频路径加入到全局的mOutputs中,之后我们会通过device和stream的类型从mOutputs中选择合适的音频路径。
addOutput(output, outputDesc);
//设置当前音频路径的默认输出设备
setOutputDevice(outputDesc,
profileType,
true,
0,
NULL,
address);
}
}
// 省略部分代码
}
outputDesc->open,调用SwAudioOutputDescriptor::open()方法,代码实现如下:
// frameworks/av/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
audio_io_handle_t *output)
{
mDevices = devices;
const String8& address = devices.getFirstValidAddress();
audio_devices_t device = devices.types();
audio_config_t lConfig;
if (config == nullptr) {
lConfig = AUDIO_CONFIG_INITIALIZER;
lConfig.sample_rate = mSamplingRate;
lConfig.channel_mask = mChannelMask;
lConfig.format = mFormat;
} else {
lConfig = *config;
}
// if the selected profile is offloaded and no offload info was specified,
// create a default one
if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
lConfig.offload_info = AUDIO_INFO_INITIALIZER;
lConfig.offload_info.sample_rate = lConfig.sample_rate;
lConfig.offload_info.channel_mask = lConfig.channel_mask;
lConfig.offload_info.format = lConfig.format;
lConfig.offload_info.stream_type = stream;
lConfig.offload_info.duration_us = -1;
lConfig.offload_info.has_video = true; // conservative
lConfig.offload_info.is_streaming = true; // likely
}
mFlags = (audio_output_flags_t)(mFlags | flags);
ALOGV("opening output for device %s profile %p name %s",
mDevices.toString().c_str(), mProfile.get(), mProfile->getName().string());
status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
output,
&lConfig,
&device,
address,
&mLatency,
mFlags);
LOG_ALWAYS_FATAL_IF(mDevices.types() != device,
"%s openOutput returned device %08x when given device %08x",
__FUNCTION__, mDevices.types(), device);
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
"%s openOutput returned output handle %d for device %08x",
__FUNCTION__, *output, device);
mSamplingRate = lConfig.sample_rate;
mChannelMask = lConfig.channel_mask;
mFormat = lConfig.format;
mId = AudioPort::getNextUniqueId();
mIoHandle = *output;
mProfile->curOpenCount++;
}
return status;
}
调用到mpClientInterface->openOutput,最后调用的是AudioFlinger的openOutput方法,它的代码实现如下:
// frameworks/av/services/audiopolicy/service/AudioPolicyClientImpl.cpp
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t *devices,
const String8& address,
uint32_t *latencyMs,
audio_output_flags_t flags)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
ALOGW("%s: could not get AudioFlinger", __func__);
return PERMISSION_DENIED;
}
return af->openOutput(module, output, config, devices, address, latencyMs, flags);
}
这里最后调用了AudioFlinger的output (这里最后会创建一个MixerThread线程与对应的output相关联,之后应用程序就可以把数据传递给线程了,进而传递给硬件设备了)。
// frameworks/av/services/audioflinger/AudioFlinger.cpp
status_t AudioFlinger::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t *devices,
const String8& address,
uint32_t *latencyMs,
audio_output_flags_t flags)
{
ALOGI("openOutput() this %p, module %d Device %#x, SamplingRate %d, Format %#08x, "
"Channels %#x, flags %#x",
this, module,
(devices != NULL) ? *devices : 0,
config->sample_rate,
config->format,
config->channel_mask,
flags);
if (devices == NULL || *devices == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
// 返回一个MixerThread线程
sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
if (thread != 0) {
if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
*latencyMs = playbackThread->latency();
// notify client processes of the new output creation
playbackThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
// the first primary output opened designates the primary hw device
if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
ALOGI("Using module %d as the primary audio interface", module);
mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev;
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_MODE;
mPrimaryHardwareDev->hwDevice()->setMode(mMode);
mHardwareStatus = AUDIO_HW_IDLE;
}
} else {
MmapThread *mmapThread = (MmapThread *)thread.get();
mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
return NO_ERROR;
}
return NO_INIT;
}
继续调用openOutput_l():
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t devices,
const String8& address,
audio_output_flags_t flags)
{
//在AudioPolicyManager中已经完成了音频硬件库的加载,这里是直接取出outHwDev的指针
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
if (outHwDev == NULL) {
return 0;
}
if (*output == AUDIO_IO_HANDLE_NONE) {
//对于每个音频路径生成全局唯一的output变量
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
} else {
// Audio Policy does not currently request a specific output handle.
// If this is ever needed, see openInput_l() for example code.
ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output);
return 0;
}
// ...
// 省略部分代码
// ...
// output: 是上层的output只是一个id,用于和playbackthread绑定
// outputStream(AudioStreamOut): 关联底层的output, 有实际的device,最后会传入到MixerThread线程中
AudioStreamOut *outputStream = NULL;
status_t status = outHwDev->openOutputStream(
&outputStream,
*output,
devices,
flags,
config,
address.string());
mHardwareStatus = AUDIO_HW_IDLE;
//根据stream的类型生成对应的PlaybackThread
if (status == NO_ERROR) {
if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
sp<MmapPlaybackThread> thread =
new MmapPlaybackThread(this, *output, outHwDev, outputStream,
devices, AUDIO_DEVICE_NONE, mSystemReady);
mMmapThreads.add(*output, thread);
ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p",
*output, thread.get());
return thread;
} else {
sp<PlaybackThread> thread;
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p",
*output, thread.get());
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(config->format)
|| !isValidPcmSinkChannelMask(config->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created direct output: ID %d thread %p",
*output, thread.get());
} else {
// music一般走这里
thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created mixer output: ID %d thread %p",
*output, thread.get());
}
mPlaybackThreads.add(*output, thread);
return thread;
}
}
return 0;
}
这里的openOutputStream会调用到hardware了,以primary为例说明如下:
// hardware/libhardware_legacy/audio/audio_hw_hal.cpp
adev->device.open_output_stream = adev_open_output_stream;
adev_open_output_stream会设置相关对音频硬件操作的函数指针。这里给出一个时序图,后面在HAL层做具体分析。