Android AudioTrack 播放流程源码分析
概述
AudioTrack是Android音频系统中用于播放音频数据的核心类,位于frameworks/base/media/java/android/media/AudioTrack.java。它支持两种数据传输模式:
- MODE_STREAM (流模式): 数据以流的方式持续写入,适用于长时间播放
- MODE_STATIC (静态模式): 数据一次性写入共享内存,适用于短促音效
一、AudioTrack创建流程
1.1 Java层构造函数
文件路径: frameworks/base/media/java/android/media/AudioTrack.java
// 推荐使用Builder模式创建
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId) throws IllegalArgumentException {
this(null /* context */, attributes, format, bufferSizeInBytes, mode, sessionId,
false /*offload*/, ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
}
关键参数验证流程 (audioParamCheck方法):
private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
int audioFormat, int mode) {
// 1. 采样率验证: SAMPLE_RATE_HZ_MIN(8000) ~ SAMPLE_RATE_HZ_MAX(192000)
// 2. 声道配置验证: 单声道、立体声或多声道
// 3. 音频格式验证: ENCODING_PCM_16BIT, ENCODING_PCM_8BIT, ENCODING_PCM_FLOAT等
// 4. 数据加载模式: MODE_STREAM 或 MODE_STATIC
}
核心初始化流程:
private AudioTrack(...) {
// 1. 参数验证
audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
audioBuffSizeCheck(bufferSizeInBytes);
// 2. 调用native_setup进行Native层初始化
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session,
attributionSourceState.getParcel(), 0, offload,
encapsulationMode, tunerConfiguration, getCurrentOpPackageName());
// 3. 设置状态
if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
}
}
1.2 JNI层 native_setup
文件路径: frameworks/base/core/jni/android_media_AudioTrack.cpp
static jint android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate,
jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode,
jintArray jSession, jobject jAttributionSource,
jlong nativeAudioTrack, jboolean offload,
jint encapsulationMode, jobject tunerConfiguration,
jstring opPackageName) {
// 1. 创建JNI存储对象(用于回调)
const auto lpJniStorage = sp<AudioTrackJniStorage>::make(clazz, weak_this, offload);
// 2. 创建Native AudioTrack对象
lpTrack = sp<AudioTrack>::make(attributionSource);
// 3. 根据模式调用set方法
switch (memoryMode) {
case MODE_STREAM:
status = lpTrack->set(
AUDIO_STREAM_DEFAULT,
sampleRateInHertz,
format,
nativeChannelMask,
frameCount,
AUDIO_OUTPUT_FLAG_NONE,
lpJniStorage,
0, // notificationFrames
0, // shared mem
true, // thread can call Java
sessionId,
AudioTrack::TRANSFER_SYNC, // 同步传输模式
nullptr,
attributionSource,
paa.get());
break;
case MODE_STATIC:
// 分配共享内存
const auto iMem = allocSharedMem(buffSizeInBytes);
status = lpTrack->set(
AUDIO_STREAM_DEFAULT,
sampleRateInHertz,
format,
nativeChannelMask,
frameCount,
AUDIO_OUTPUT_FLAG_NONE,
lpJniStorage,
0,
iMem, // 共享内存
true,
sessionId,
AudioTrack::TRANSFER_SHARED, // 共享内存传输模式
nullptr,
attributionSource,
paa.get());
break;
}
// 4. 保存Native对象到Java字段
setFieldSp(env, thiz, lpTrack, javaAudioTrackFields.nativeTrackInJavaObj);
setFieldSp(env, thiz, lpJniStorage, javaAudioTrackFields.jniData);
}
1.3 Native层 AudioTrack::set()
文件路径: frameworks/av/media/libaudioclient/AudioTrack.cpp
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
const wp<IAudioTrackCallback>& callback,
int32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
const AttributionSourceState& attributionSource,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId) {
// 1. 确定传输类型
switch (transferType) {
case TRANSFER_DEFAULT:
if (sharedBuffer != 0) {
transferType = TRANSFER_SHARED;
} else if (callback == nullptr || threadCanCallJava) {
transferType = TRANSFER_SYNC;
} else {
transferType = TRANSFER_CALLBACK;
}
break;
// ... 其他case
}
// 2. 如果有回调,创建AudioTrackThread
if (callback != nullptr) {
mAudioTrackThread = sp<AudioTrackThread>::make(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0);
}
// 3. 创建IAudioTrack(与AudioFlinger建立连接)
{
AutoMutex lock(mLock);
status = createTrack_l();
}
}
1.4 createTrack_l() - 与AudioFlinger建立连接
文件路径: frameworks/av/media/libaudioclient/AudioTrack.cpp
status_t AudioTrack::createTrack_l() {
// 1. 获取AudioFlinger服务
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
// 2. 构建输入参数
IAudioFlinger::CreateTrackInput input;
input.attr = mAttributes;
input.config.sample_rate = mSampleRate;
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
input.sharedBuffer = mSharedBuffer;
input.flags = mFlags;
input.frameCount = mReqFrameCount;
input.sessionId = mSessionId;
// 3. 调用AudioFlinger创建Track
media::CreateTrackResponse response;
status = audioFlinger->createTrack(VALUE_OR_FATAL(input.toAidl()), response);
// 4. 获取返回的IAudioTrack和共享内存控制块
mAudioTrack = output.audioTrack;
// 获取控制块共享内存
output.audioTrack->getCblk(&sfr);
sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->unsecurePointer());
mCblk = cblk;
// 5. 创建ClientProxy(用于客户端写入数据)
if (mSharedBuffer == 0) {
mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
} else {
mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy = mStaticProxy;
}
// 6. 注册死亡通知
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
}
1.5 AudioFlinger端 Track创建
文件路径: frameworks/av/services/audioflinger/Tracks.cpp
// TrackBase构造函数
AudioFlinger::ThreadBase::TrackBase::TrackBase(
ThreadBase *thread,
const sp<Client>& client,
const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
size_t bufferSize,
audio_session_t sessionId,
pid_t creatorPid,
uid_t clientUid,
bool isOut,
alloc_type alloc,
track_type type,
audio_port_handle_t portId,
std::string metricsId)
{
// 1. 分配共享内存(控制块 + 缓冲区)
size_t size = sizeof(audio_track_cblk_t) + bufferSize;
mCblkMemory = client->allocator().allocate(NamedAllocRequest{{size}, ...});
mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer());
// 2. 构造控制块
new(mCblk) audio_track_cblk_t();
// 3. 设置缓冲区指针
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
}
二、数据写入流程
2.1 同步写入模式 (TRANSFER_SYNC)
Java层 write()
// AudioTrack.java
public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
@WriteMode int writeMode) {
if (mState == STATE_UNINITIALIZED) {
return ERROR_INVALID_OPERATION;
}
// 调用native方法
return native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, writeMode);
}
JNI层
// android_media_AudioTrack.cpp
template <typename T>
static jint android_media_AudioTrack_writeArray(JNIEnv *env, jobject thiz,
T javaAudioData,
jint offsetInSamples, jint sizeInSamples,
jint javaAudioFormat,
jboolean isWriteBlocking) {
sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
// 获取数组指针
auto cAudioData = envGetArrayElements(env, javaAudioData, NULL);
// 调用Native write
jint samplesWritten = writeToTrack(lpTrack, javaAudioFormat, cAudioData,
offsetInSamples, sizeInSamples, isWriteBlocking == JNI_TRUE);
envReleaseArrayElements(env, javaAudioData, cAudioData, 0);
return samplesWritten;
}
template <typename T>
static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,
jint offsetInSamples, jint sizeInSamples, bool blocking) {
ssize_t written = 0;
size_t sizeInBytes = sizeInSamples * sizeof(T);
if (track->sharedBuffer() == 0) {
// MODE_STREAM: 调用AudioTrack::write
written = track->write(data + offsetInSamples, sizeInBytes, blocking);
} else {
// MODE_STATIC: 直接复制到共享内存
memcpy(track->sharedBuffer()->unsecurePointer(), data + offsetInSamples, sizeInBytes);
written = sizeInBytes;
}
return written / sizeof(T);
}
Native层 AudioTrack::write()
// AudioTrack.cpp
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) {
// 验证传输模式
if (mTransfer != TRANSFER_SYNC && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
return INVALID_OPERATION;
}
size_t written = 0;
Buffer audioBuffer;
while (userSize >= mFrameSize) {
audioBuffer.frameCount = userSize / mFrameSize;
// 1. 获取缓冲区
status_t err = obtainBuffer(&audioBuffer,
blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
if (err < 0) {
if (written > 0) break;
return ssize_t(err);
}
// 2. 复制数据到共享缓冲区
size_t toWrite = audioBuffer.size();
memcpy(audioBuffer.raw, buffer, toWrite);
buffer = ((const char *) buffer) + toWrite;
userSize -= toWrite;
written += toWrite;
// 3. 释放缓冲区(通知服务端)
releaseBuffer(&audioBuffer);
}
// 更新写入帧计数
if (written > 0) {
mFramesWritten += written / mFrameSize;
}
return written;
}
2.2 obtainBuffer/releaseBuffer 机制
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
struct timespec *elapsed, size_t *nonContig) {
// 通过Proxy获取可用缓冲区
Proxy::Buffer buffer;
sp<AudioTrackClientProxy> proxy;
{
AutoMutex lock(mLock);
proxy = mProxy;
// 非活动状态时非阻塞
if (mState != STATE_ACTIVE) {
requested = &ClientProxy::kNonBlocking;
}
}
buffer.mFrameCount = audioBuffer->frameCount;
status = proxy->obtainBuffer(&buffer, requested, elapsed);
audioBuffer->frameCount = buffer.mFrameCount;
audioBuffer->mSize = buffer.mFrameCount * mFrameSize;
audioBuffer->raw = buffer.mRaw;
return status;
}
void AudioTrack::releaseBuffer(const Buffer* audioBuffer) {
size_t stepCount = audioBuffer->mSize / mFrameSize;
Proxy::Buffer buffer;
buffer.mFrameCount = stepCount;
buffer.mRaw = audioBuffer->raw;
AutoMutex lock(mLock);
mReleased += stepCount;
mInUnderrun = false;
mProxy->releaseBuffer(&buffer);
}
2.3 回调模式 (TRANSFER_CALLBACK)
回调线程 AudioTrackThread
// AudioTrack.cpp
class AudioTrackThread : public Thread {
virtual bool threadLoop() {
nsecs_t ns = processAudioBuffer();
if (ns == NS_NEVER) {
return false;
}
if (ns > 0) {
Mutex::Autolock _l(mMyLock);
mMyCond.waitRelative(mMyLock, ns);
}
return true;
}
};
processAudioBuffer() 回调处理
nsecs_t AudioTrack::processAudioBuffer() {
mLock.lock();
sp<IAudioTrackCallback> callback = mCallback.promote();
// 获取标志和位置
int32_t flags = android_atomic_and(
~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
&mCblk->mFlags);
// 处理Underrun事件
if (flags & CBLK_UNDERRUN) {
callback->onUnderrun();
}
// 处理Marker事件
Modulo<uint32_t> position(updateAndGetPosition_l());
if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
mMarkerReached = true;
callback->onMarker(markerPosition.value());
}
// 处理周期位置更新
if (updatePeriod > 0 && position >= newPosition) {
callback->onNewPos(newPosition.value());
mNewPosition += updatePeriod;
}
// 处理EVENT_MORE_DATA(回调模式的核心)
if (mTransfer == TRANSFER_CALLBACK) {
Buffer buffer;
buffer.frameCount = mRemainingFrames;
status = obtainBuffer(&buffer, ...);
if (status == NO_ERROR) {
size_t written = callback->onMoreData(buffer);
releaseBuffer(&buffer);
}
}
return nextTimeout;
}
2.4 数据流转路径图
┌─────────────────────────────────────────────────────────────────────────┐
│ Application Layer │
│ ┌─────────────────────────────────────────────────────────────────┐ │
│ │ AudioTrack.write(byte[] audioData) │ │
│ └──────────────────────────────┬──────────────────────────────────┘ │
└─────────────────────────────────┼───────────────────────────────────────┘
│ JNI
▼
┌─────────────────────────────────────────────────────────────────────────┐
│ Native Layer │
│ ┌─────────────────────────────────────────────────────────────────┐ │
│ │ AudioTrack::write() │ │
│ │ └── obtainBuffer() ──→ AudioTrackClientProxy::obtainBuffer() │ │
│ │ └── memcpy() │ │
│ │ └── releaseBuffer() ──→ AudioTrackClientProxy::releaseBuffer()│ │
│ └──────────────────────────────┬──────────────────────────────────┘ │
└─────────────────────────────────┼───────────────────────────────────────┘
│ Shared Memory
▼
┌─────────────────────────────────────────────────────────────────────────┐
│ AudioFlinger │
│ ┌─────────────────────────────────────────────────────────────────┐ │
│ │ PlaybackThread::Track │ │
│ │ └── ServerProxy::obtainBuffer() │ │
│ │ └── getNextBuffer() ──→ MixerThread │ │
│ └──────────────────────────────┬──────────────────────────────────┘ │
└─────────────────────────────────┼───────────────────────────────────────┘
│
▼
┌─────────────────────────────────────────────────────────────────────────┐
│ Audio HAL │
│ ┌─────────────────────────────────────────────────────────────────┐ │
│ │ AudioStreamOut::write() │ │
│ └─────────────────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────────────┘
三、播放控制
3.1 play() 流程
Java层
// AudioTrack.java
public void play() throws IllegalStateException {
if (mState != STATE_INITIALIZED) {
throw new IllegalStateException("play() called on uninitialized AudioTrack.");
}
synchronized(mPlayStateLock) {
baseStart(0);
native_start();
mPlayState = PLAYSTATE_PLAYING;
}
}
Native层
// AudioTrack.cpp
status_t AudioTrack::start() {
AutoMutex lock(mLock);
if (mState == STATE_ACTIVE) {
return INVALID_OPERATION;
}
State previousState = mState;
mState = STATE_ACTIVE;
// 重置位置
if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
mPosition = 0;
mFramesWritten = 0;
mProxy->clearTimestamp();
mMarkerReached = false;
}
// 调用IAudioTrack::start
int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED),
&mCblk->mFlags);
if (!(flags & CBLK_INVALID)) {
mAudioTrack->start(&status);
}
// 恢复回调线程
sp<AudioTrackThread> t = mAudioTrackThread;
if (status == NO_ERROR && t != 0) {
t->resume();
}
return status;
}
3.2 pause() 流程
void AudioTrack::pause() {
AutoMutex lock(mLock);
if (mState == STATE_ACTIVE) {
mState = STATE_PAUSED;
} else {
return;
}
mProxy->interrupt();
mAudioTrack->pause();
// 对于offload模式,缓存当前位置
if (isOffloaded_l()) {
AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
}
}
3.3 stop() 流程
void AudioTrack::stop() {
AutoMutex lock(mLock);
if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
return;
}
if (isOffloaded_l()) {
mState = STATE_STOPPING; // 等待STREAM_END事件
} else {
mState = STATE_STOPPED;
mReleased = 0;
}
mProxy->stop();
mProxy->interrupt();
mAudioTrack->stop();
// 暂停回调线程
sp<AudioTrackThread> t = mAudioTrackThread;
if (t != 0 && !isOffloaded_l()) {
t->pause();
}
}
3.4 flush() 流程
void AudioTrack::flush() {
AutoMutex lock(mLock);
// 不能在活动状态flush
if (mSharedBuffer != 0 || mState == STATE_ACTIVE) {
return;
}
flush_l();
}
void AudioTrack::flush_l() {
// 清除标记和周期更新计数器
mMarkerPosition = 0;
mMarkerReached = false;
mUpdatePeriod = 0;
mRefreshRemaining = true;
mState = STATE_FLUSHED;
mReleased = 0;
mProxy->flush();
mAudioTrack->flush();
}
3.5 播放状态转换图
┌──────────────┐
│ CREATED │
└──────┬───────┘
│ start()
▼
┌─────────────┐ ┌──────────────┐ ┌─────────────┐
│ PAUSED │◄────│ ACTIVE │────►│ STOPPING │
└──────┬──────┘ └──────┬───────┘ └──────┬──────┘
│ │ │
│ pause() │ stop() │ STREAM_END
│ │ │ (offload only)
│ ▼ ▼
│ ┌──────────────┐ ┌─────────────┐
└───────────►│ STOPPED │◄────│ PAUSED_ │
└──────┬───────┘ │ STOPPING │
│ └─────────────┘
│ flush()
▼
┌──────────────┐
│ FLUSHED │
└──────────────┘
四、回调机制
4.1 回调接口定义
Native层 (frameworks/av/media/libaudioclient/include/media/AudioTrack.h):
class IAudioTrackCallback : public virtual RefBase {
protected:
// 请求写入更多数据(TRANSFER_CALLBACK模式)
virtual size_t onMoreData(const AudioTrack::Buffer& buffer) { return 0; }
// Underrun发生
virtual void onUnderrun() {}
// 循环结束
virtual void onLoopEnd(int32_t loopsRemaining) {}
// 到达标记位置
virtual void onMarker(uint32_t markerPosition) {}
// 新位置通知
virtual void onNewPos(uint32_t newPos) {}
// 静态缓冲区播放完成
virtual void onBufferEnd() {}
// IAudioTrack重建
virtual void onNewIAudioTrack() {}
// 流结束(offload模式)
virtual void onStreamEnd() {}
};
4.2 JNI层回调实现
文件路径: frameworks/base/core/jni/android_media_AudioTrack.cpp
class AudioTrackCallbackImpl : public AudioTrack::IAudioTrackCallback {
public:
enum event_type {
EVENT_MORE_DATA = 0,
EVENT_UNDERRUN = 1,
EVENT_LOOP_END = 2,
EVENT_MARKER = 3,
EVENT_NEW_POS = 4,
EVENT_BUFFER_END = 5,
EVENT_NEW_IAUDIOTRACK = 6,
EVENT_STREAM_END = 7,
EVENT_CAN_WRITE_MORE_DATA = 9
};
void onMarker(uint32_t markerPosition) override {
postEvent(EVENT_MARKER);
}
void onNewPos(uint32_t newPos) override {
postEvent(EVENT_NEW_POS);
}
void onStreamEnd() override {
if (!mIsOffload) return;
postEvent(EVENT_STREAM_END);
}
private:
void postEvent(int event, int arg = 0) {
auto env = getJNIEnvOrDie();
// 调用Java层的静态方法
env->CallStaticVoidMethod(
mAudioTrackClass,
javaAudioTrackFields.postNativeEventInJava,
mAudioTrackWeakRef, event, arg, 0, NULL);
}
};
4.3 Java层回调处理
// AudioTrack.java
public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
Handler handler) {
if (listener != null) {
mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
} else {
mEventHandlerDelegate = null;
}
}
// 内部委托类处理Native事件
private class NativePositionEventHandlerDelegate {
private final Handler mHandler;
NativePositionEventHandlerDelegate(final AudioTrack audioTrack,
final OnPlaybackPositionUpdateListener listener, Handler handler) {
mHandler = new Handler(handler.getLooper()) {
@Override
public void handleMessage(Message msg) {
switch(msg.what) {
case NATIVE_EVENT_MARKER:
listener.onMarkerReached(audioTrack);
break;
case NATIVE_EVENT_NEW_POS:
listener.onPeriodicNotification(audioTrack);
break;
}
}
};
}
// 从Native层接收事件
@SuppressWarnings("unused")
private void postEventFromNative(int what, int arg1, int arg2, Object obj) {
Message m = mHandler.obtainMessage(what, arg1, arg2, obj);
mHandler.sendMessage(m);
}
}
4.4 事件触发时机
| 事件类型 | 触发时机 | 适用模式 |
|---|---|---|
| EVENT_MORE_DATA | 需要更多音频数据填充缓冲区 | TRANSFER_CALLBACK |
| EVENT_UNDERRUN | 缓冲区欠载发生 | STREAM模式 |
| EVENT_LOOP_END | 循环播放到达结束点 | STATIC模式 |
| EVENT_MARKER | 播放位置到达预设标记 | 所有模式 |
| EVENT_NEW_POS | 周期性位置更新 | 所有模式 |
| EVENT_BUFFER_END | 静态缓冲区播放完成 | STATIC模式 |
| EVENT_NEW_IAUDIOTRACK | IAudioTrack被重建 | 所有模式 |
| EVENT_STREAM_END | 所有缓冲数据播放完成 | Offload模式 |
| EVENT_CAN_WRITE_MORE_DATA | 可以写入更多数据 | TRANSFER_SYNC_NOTIF_CALLBACK |
五、关键数据结构
5.1 audio_track_cblk_t (控制块)
// AudioTrackShared.h
struct audio_track_cblk_t {
// 帧计数器
std::atomic<int32_t> mFrameCount; // 用户写入的帧数
std::atomic<int32_t> mStepCount; // 服务端消耗的帧数
// 状态标志
std::atomic<uint32_t> mFlags; // CBLK_标志位
// 其他字段
uint32_t mServer; // 服务端位置
uint32_t mClient; // 客户端位置
uint32_t mBufferSize; // 缓冲区大小
uint8_t mOut; // 输出/输入标志
uint8_t mStreaming; // 流/静态模式
// Futex用于等待/唤醒
std::atomic<int32_t> mFutex; // 用于同步
};
5.2 AudioTrackClientProxy
// ClientProxy.h
class AudioTrackClientProxy : public ClientProxy {
public:
AudioTrackClientProxy(audio_track_cblk_t* cblk, void* buffers,
size_t frameCount, size_t frameSize);
// 获取可用缓冲区
status_t obtainBuffer(Buffer* buffer, const struct timespec* requested);
// 释放缓冲区
void releaseBuffer(Buffer* buffer);
// 设置音量
void setVolumeLR(gain_minifloat_packed_t volume);
// 设置采样率
void setSampleRate(uint32_t sampleRate);
// 设置播放速率
void setPlaybackRate(const AudioPlaybackRate& playbackRate);
// 刷新
void flush();
// 中断等待
void interrupt();
};
5.3 AudioTrack::Buffer
class Buffer {
public:
size_t size() const { return mSize; }
size_t getFrameCount() const { return frameCount; }
uint8_t* data() const { return ui8; }
size_t frameCount; // 帧数
private:
size_t mSize; // 字节数
union {
void* raw;
int16_t* i16; // 16位PCM
uint8_t* ui8; // 8位PCM
};
uint32_t sequence; // IAudioTrack序列号
};
六、传输模式对比
| 模式 | 描述 | 使用场景 | 数据流向 |
|---|---|---|---|
| TRANSFER_SYNC | 同步write() | MODE_STREAM默认模式 | App → write() → SharedMem |
| TRANSFER_CALLBACK | 回调模式 | 需要精确控制时序 | App ← onMoreData() ← AudioTrackThread |
| TRANSFER_OBTAIN | obtain/release | 底层库使用 | App → obtainBuffer() → releaseBuffer() |
| TRANSFER_SHARED | 共享内存 | MODE_STATIC模式 | App → memcpy() → SharedMem |
| TRANSFER_SYNC_NOTIF_CALLBACK | 同步+通知 | Offload模式 | App → write() + onCanWriteMoreData() |
七、与AudioFlinger的交互
7.1 IAudioTrack接口
// IAudioTrack.aidl
interface IAudioTrack {
void start(out int status);
void stop();
void flush();
void pause();
SharedFileRegion getCblk();
void attachAuxEffect(int effectId, out int status);
void setVolume(float left, float right, out int status);
void setPlaybackRateParameters(in AudioPlaybackRate playbackRate, out int status);
AudioPlaybackRate getPlaybackRateParameters(out int status);
void setDualMonoMode(in AudioDualMonoMode mode, out int status);
AudioDualMonoMode getDualMonoMode(out int status);
}
7.2 TrackHandle (BnAudioTrack实现)
// Tracks.cpp
class TrackHandle : public BnAudioTrack {
public:
TrackHandle(const sp<PlaybackThread::Track>& track);
Status start(int32_t* _aidl_return) override {
*_aidl_return = mTrack->start();
return Status::ok();
}
Status stop() override {
mTrack->stop();
return Status::ok();
}
Status flush() override {
mTrack->flush();
return Status::ok();
}
Status pause() override {
mTrack->pause();
return Status::ok();
}
private:
sp<PlaybackThread::Track> mTrack;
};
八、总结
8.1 核心类关系图
┌────────────────────────────────────────────────────────────────────┐
│ Java Layer │
│ ┌─────────────────┐ ┌─────────────────────────────────────┐ │
│ │ AudioTrack │───►│ OnPlaybackPositionUpdateListener │ │
│ └────────┬────────┘ └─────────────────────────────────────┘ │
└───────────┼────────────────────────────────────────────────────────┘
│ JNI
▼
┌────────────────────────────────────────────────────────────────────┐
│ Native Layer │
│ ┌─────────────────┐ ┌─────────────────┐ ┌──────────────┐ │
│ │ AudioTrack │───►│AudioTrackThread │ │AudioTrackJni │ │
│ │ │ │ (callback) │ │ Storage │ │
│ └────────┬────────┘ └─────────────────┘ └──────────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────────┐ │
│ │ AudioTrackClientProxy │ │
│ │ ┌────────────────┐ ┌───────────────────────────────┐ │ │
│ │ │audio_track_cblk│ │ Shared Buffer │ │ │
│ │ │ (Control) │ │ (Audio Data) │ │ │
│ │ └────────────────┘ └───────────────────────────────┘ │ │
│ └─────────────────────────────────────────────────────────────┘ │
└───────────┬────────────────────────────────────────────────────────┘
│ Binder (IAudioTrack)
▼
┌────────────────────────────────────────────────────────────────────┐
│ AudioFlinger │
│ ┌─────────────────────────────────────────────────────────────┐ │
│ │ PlaybackThread │ │
│ │ ┌────────────────┐ ┌────────────────────────────────┐ │ │
│ │ │ Track │───►│ ServerProxy │ │ │
│ │ │ (TrackHandle) │ │ │ │ │
│ │ └────────────────┘ └────────────────────────────────┘ │ │
│ │ │ │ │
│ │ ▼ │ │
│ │ ┌──────────────────────────────────────────────────────┐ │ │
│ │ │ AudioMixer │ │ │
│ │ └──────────────────────────────────────────────────────┘ │ │
│ └─────────────────────────────────────────────────────────────┘ │
└────────────────────────────────────────────────────────────────────┘
8.2 关键流程总结
-
创建流程: Java构造 → JNI native_setup → Native AudioTrack::set → createTrack_l → AudioFlinger创建Track
-
数据写入: Java write → JNI writeToTrack → Native AudioTrack::write → obtainBuffer → memcpy → releaseBuffer
-
播放控制: play/pause/stop → JNI → Native AudioTrack → IAudioTrack → AudioFlinger Track
-
回调机制: AudioTrackThread::threadLoop → processAudioBuffer → IAudioTrackCallback → JNI → Java Listener
8.3 核心源码文件
| 文件路径 | 功能 |
|---|---|
frameworks/base/media/java/android/media/AudioTrack.java | Java层API |
frameworks/base/core/jni/android_media_AudioTrack.cpp | JNI桥接层 |
frameworks/av/media/libaudioclient/AudioTrack.cpp | Native客户端实现 |
frameworks/av/media/libaudioclient/include/media/AudioTrack.h | Native接口定义 |
frameworks/av/services/audioflinger/Tracks.cpp | AudioFlinger端Track实现 |
frameworks/av/media/libaudioclient/AudioTrackShared.cpp | 共享内存代理 |