Android 音频子系统--07:AudioTrack实例详解

584 阅读8分钟

本文转载自:Android Framework 音频子系统(05)AudioTrack使用案例

基于Android 9.0

1.概述

以上思维导图AudioTrack部分,主要是分析了两个测试程( C++层的shared_mem_test和Java层的MediaAudioTrackTest.java)进而分析了Java层AudioTrack到C++层AudioTrack调用流程(构造器流程和write流程)。

2.shared_mem_test测试程序分析

  shared_mem_test测试程序的入口main函数为:

// frameworks/base/media/tests/audiotests/shared_mem_test.cpp
int main() {

    return android::main();
}

继续分析,android::main(),代码实现如下:

// frameworks/base/media/tests/audiotests/shared_mem_test.cpp
int main() {
    ProcessState::self()->startThreadPool();
    AudioTrackTest *test;

    test = new AudioTrackTest();
    test->Execute();
    delete test;

    return 0;
}

这里主要分析两个关键点:AudioTrackTest构造器和它的Execute方法。

2.1 AudioTrackTest构造器

// frameworks/base/media/tests/audiotests/shared_mem_test.cpp
AudioTrackTest::AudioTrackTest(void) {

    InitSine();         // init sine table

}

继续分析InitSine,代码实现如下:

// frameworks/base/media/tests/audiotests/shared_mem_test.cpp
void AudioTrackTest::InitSine(void) {
    double phi = 0;
    double dPhi = 2 * M_PI / SIN_SZ;
    for(int i0 = 0; i0<SIN_SZ; i0++) {
        long d0;

        d0 = 32768. * sin(phi);
        phi += dPhi;
        if(d0 >= 32767) d0 = 32767;
        if(d0 <= -32768) d0 = -32768;
        sin1024[i0] = (short)d0;
    }
}

这里主要是构造数据和波形,为下一步做准备。

2.2 AudioTrackTest的Execute方法

// frameworks/base/media/tests/audiotests/shared_mem_test.cpp
void AudioTrackTest::Execute(void) {
    if (Test01() == 0) {
        ALOGD("01 passed\n");
    } else {
        ALOGD("01 failed\n");
    }
}

继续分析Test01,代码实现如下:

// frameworks/base/media/tests/audiotests/shared_mem_test.cpp
int AudioTrackTest::Test01() {

    sp<MemoryDealer> heap;
    sp<IMemory> iMem;
    uint8_t* p;

    short smpBuf[BUF_SZ];
    long rate = 44100;
    unsigned long phi;
    unsigned long dPhi;
    long amplitude;
    long freq = 1237;
    float f0;

    f0 = pow(2., 32.) * freq / (float)rate;
    dPhi = (unsigned long)f0;
    amplitude = 1000;
    phi = 0;
    Generate(smpBuf, BUF_SZ, amplitude, phi, dPhi);  // fill buffer

    for (int i = 0; i < 1024; i++) {
        heap = new MemoryDealer(1024*1024, "AudioTrack Heap Base");

        iMem = heap->allocate(BUF_SZ*sizeof(short));

        p = static_cast<uint8_t*>(iMem->pointer());
        memcpy(p, smpBuf, BUF_SZ*sizeof(short));

        //关键点1:创建一个AudioTrack对象,并传递数据
        sp<AudioTrack> track = new AudioTrack(AUDIO_STREAM_MUSIC,// stream type
               rate,
               AUDIO_FORMAT_PCM_16_BIT,// word length, PCM
               AUDIO_CHANNEL_OUT_MONO,
               iMem);

        status_t status = track->initCheck();
        if(status != NO_ERROR) {
            track.clear();
            ALOGD("Failed for initCheck()");
            return -1;
        }

        // start play
        //关键点2:开始播放
        ALOGD("start");
        track->start();

        usleep(20000);

        ALOGD("stop");
        track->stop();
        iMem.clear();
        heap.clear();
        usleep(20000);
    }

    return 0;

}

通过上面的分析可以知道,Native层的AudioTrack的使用流程是:

  • 构建参数并传递给AudioTrack,注意,这里iMem中含有带数据的Buffer;
  • 执行start操作开始播放。

3.MediaAudioTrackTest测试程序分析

  MediaAudioTrackTest中有很多测试用例,这里分析其中一个用例,代码实现如下:

// frameworks/base/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/audio/MediaAudioTrackTest.java
public class MediaAudioTrackTest extends ActivityInstrumentationTestCase2<MediaFrameworkTest> {    
    private String TAG = "MediaAudioTrackTest";
    //...
    //Test case 1: setPlaybackHeadPosition() on playing track
    @LargeTest
    public void testSetPlaybackHeadPositionPlaying() throws Exception {
        // constants for test
        final String TEST_NAME = "testSetPlaybackHeadPositionPlaying";
        final int TEST_SR = 22050;
        final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO;
        final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
        final int TEST_MODE = AudioTrack.MODE_STREAM;
        final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
        
        //-------- initialization --------------
        int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
        //关键点1:构建并传递参数给AudioTrack对象
        AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 
                2*minBuffSize, TEST_MODE);
        byte data[] = new byte[minBuffSize];
        //--------    test        --------------
        assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
        track.write(data, 0, data.length);
        track.write(data, 0, data.length);
        //关键点2:播放
        track.play();
        assertTrue(TEST_NAME,
                track.setPlaybackHeadPosition(10) == AudioTrack.ERROR_INVALID_OPERATION);
        //-------- tear down      --------------
        track.release();
    }
    //...
}

通过上面的分析可以知道,Java层的AudioTrack的使用流程是:

  • 构建参数并传递给AudioTrack。

  • 执行wrtie写入数据,相当于 shared_mem_test中的iMem。

  • 执行start操作开始播放。

  接下来我们分析Java层AudioTrack 到C++层AudioTrack的调用过程,一个是构造器,一个是write函数。

4.AudioTrack从Java层到C++层

4.1 构造器分析

  同时这里Java层AudioTrack对象的创建,最后也导致了Native层的AudioTrack对象的创建。分析Java层的AudioTrack,代码实现如下:

// frameworks/base/media/java/android/media/AudioTrack.java
private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
            int mode, int sessionId, boolean offload)
                    throws IllegalArgumentException {
        super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
        // ......
        int[] sampleRate = new int[] {mSampleRate};
        int[] session = new int[1];
        session[0] = sessionId;
        // native initialization
        int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
                sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
                mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
                offload);
        if (initResult != SUCCESS) {
            loge("Error code "+initResult+" when initializing AudioTrack.");
            return; // with mState == STATE_UNINITIALIZED
        }
        // ......
}

根据JNI的映射关系:

// frameworks/base/core/jni/android_media_AudioTrack.cpp
{"native_setup",     "(Ljava/lang/Object;Ljava/lang/Object;IIIII[I)I",
                                         (void *)android_media_AudioTrack_setup},

继续分析android_media_AudioTrack_setup的实现,代码如下:

// frameworks/base/core/jni/android_media_AudioTrack.cpp
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
        jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
        jlong nativeAudioTrack, jboolean offload) {
        // ......
        // create the native AudioTrack object
        //关键点1:创建native AudioTrack对象
        lpTrack = new AudioTrack();
        // ...
        // 省略部分代码
        // ...
        status_t status = NO_ERROR;
        //这里开始针对两种模式MODE_STREAM和MODE_STATIC进行不同参数的设置
        switch (memoryMode) {
        case MODE_STREAM:

            //关键点1:set方法,设置参数
            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    0,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SYNC,
                    offload ? &offloadInfo : NULL,
                    -1, -1,                       // default uid, pid values
                    paa);
            break;

        case MODE_STATIC:
            // AudioTrack is using shared memory

            // 应用端申请共享内存
            if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                goto native_init_failure;
            }

            //关键点2:set方法,设置参数
            status = lpTrack->set(
                    AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
                    sampleRateInHertz,
                    format,// word length, PCM
                    nativeChannelMask,
                    frameCount,
                    AUDIO_OUTPUT_FLAG_NONE,
                    audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
                    0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
                    lpJniStorage->mMemBase,// shared mem
                    true,// thread can call Java
                    sessionId,// audio session ID
                    AudioTrack::TRANSFER_SHARED,
                    NULL,                         // default offloadInfo
                    -1, -1,                       // default uid, pid values
                    paa);
            break;
        // ...
        // 省略部分代码
        // ...
}

这里执行了两个关键操作,一个是创建C++层的AudioTrack对象,另一个是执行它的set方法,而C++层的AudioTrack对象它的构造器代码如下:

// frameworks/av/media/libaudioclient/AudioTrack.cpp
//无参构造方法,后再调用set方法
AudioTrack::AudioTrack()
    : mStatus(NO_INIT),
      mState(STATE_STOPPED),
      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
      mPreviousSchedulingGroup(SP_DEFAULT),
      mPausedPosition(0),
      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
    mAttributes.flags = 0x0;
    strcpy(mAttributes.tags, "");
}
// ---------------
//有参对象,不需再调用set方法
AudioTrack::AudioTrack(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int32_t notificationFrames,
        audio_session_t sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        uid_t uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect,
        float maxRequiredSpeed,
        audio_port_handle_t selectedDeviceId)
    : mStatus(NO_INIT),
      mState(STATE_STOPPED),
      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
      mPreviousSchedulingGroup(SP_DEFAULT),
      mPausedPosition(0)
{
    // 构造方法里面自动调用set()方法
    (void)set(streamType, sampleRate, format, channelMask,
            frameCount, flags, cbf, user, notificationFrames,
            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
}

可以看到 AudioTrack的使用有两种情况:

  • 无参数构造器,后期需要再执行set方法 设置参数。
  • 有参数构造器,会直接调用set方法 设置参数。

  总结下:播放声音时都要创建AudioTrack对象,java的AudioTrack对象创建时会导致c++的AudioTrack对象被创建;所以分析的核心是c++的AudioTrack类,创建AudioTrack时涉及一个重要函数:set。同时AudioTrack只能确定声音的属性,并不能确定声音从哪个设备播放。

4.2 write函数

  这里的写函数有很多,如下:

// frameworks/base/media/java/android/media/AudioTrack.java
public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
    //...
    int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
            true /*isBlocking*/);
    //...
    return ret;
}
 
public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
    //...
    int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
    //...
    return ret;
}
 
public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
        @WriteMode int writeMode) {
    //...
    int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
            writeMode == WRITE_BLOCKING);
    //...
    return ret;
}
 
public int write(ByteBuffer audioData, int sizeInBytes,
        @WriteMode int writeMode) {
    int ret = 0;
    if (audioData.isDirect()) {
        ret = native_write_native_bytes(audioData,
                audioData.position(), sizeInBytes, mAudioFormat,
                writeMode == WRITE_BLOCKING);
    } else {
        ret = native_write_byte(NioUtils.unsafeArray(audioData),
                NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
                sizeInBytes, mAudioFormat,
                writeMode == WRITE_BLOCKING);
    }
    return ret;
}

这里均为native层的调用,继续跟进,代码如下:

// frameworks/base/core/jni/android_media_AudioTrack.cpp
static jint android_media_AudioTrack_write_byte(JNIEnv *env,  jobject thiz,
                                                  jbyteArray javaAudioData,
                                                  jint offsetInBytes, jint sizeInBytes,
                                                  jint javaAudioFormat,
                                                  jboolean isWriteBlocking) {
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    //...
    jint written = writeToTrack(lpTrack, javaAudioFormat, cAudioData, offsetInBytes, sizeInBytes,
            isWriteBlocking == JNI_TRUE /* blocking */);
    //...
    return written;
}
 
static jint android_media_AudioTrack_write_native_bytes(JNIEnv *env,  jobject thiz,
        jbyteArray javaBytes, jint byteOffset, jint sizeInBytes,
        jint javaAudioFormat, jboolean isWriteBlocking) {
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    //...
    jint written = writeToTrack(lpTrack, javaAudioFormat, bytes.get(), byteOffset,
            sizeInBytes, isWriteBlocking == JNI_TRUE /* blocking */);
    return written;
}
 
static jint android_media_AudioTrack_write_short(JNIEnv *env,  jobject thiz,
                                                  jshortArray javaAudioData,
                                                  jint offsetInShorts, jint sizeInShorts,
                                                  jint javaAudioFormat) {
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    //...
    jint written = writeToTrack(lpTrack, javaAudioFormat, (jbyte *)cAudioData,
                                offsetInShorts * sizeof(short), sizeInShorts * sizeof(short),
            true /*blocking write, legacy behavior*/);
    //...
    return written;
}
 
static jint android_media_AudioTrack_write_float(JNIEnv *env,  jobject thiz,
                                                  jfloatArray javaAudioData,
                                                  jint offsetInFloats, jint sizeInFloats,
                                                  jint javaAudioFormat,
                                                  jboolean isWriteBlocking) {
 
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    //...
    jint written = writeToTrack(lpTrack, javaAudioFormat, (jbyte *)cAudioData,
                                offsetInFloats * sizeof(float), sizeInFloats * sizeof(float),
                                isWriteBlocking == JNI_TRUE /* blocking */);
    //...
    return written;
}
 

这里native相关函数最终都调用了writeToTrack函数,代码实现如下:

// frameworks/base/core/jni/android_media_AudioTrack.cpp
static jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const T *data,
                         jint offsetInSamples, jint sizeInSamples, bool blocking) {
    // give the data to the native AudioTrack object (the data starts at the offset)
    ssize_t written = 0;
    // regular write() or copy the data to the AudioTrack's shared memory?
    size_t sizeInBytes = sizeInSamples * sizeof(T);
    //playbackthread提供共享内存,调用C++层track的write函数
    if (track->sharedBuffer() == 0) {
        written = track->write(data + offsetInSamples, sizeInBytes, blocking);
        // for compatibility with earlier behavior of write(), return 0 in this case
        if (written == (ssize_t) WOULD_BLOCK) {
            written = 0;
        }
    } else {//应用端 提供共享内存,直接执行memcpy
        // writing to shared memory, check for capacity
        if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {
            sizeInBytes = track->sharedBuffer()->size();
        }
        //这里将data数据拷贝给 共享内存
        memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes);
        written = sizeInBytes;
    }
    if (written >= 0) {
        return written / sizeof(T);
    }
    return interpretWriteSizeError(written);
}

接下来就调用 C++层 track的一些基本操作了,本段代码简要解读如下:

  • 如果track->sharedBuffer() == 0,即由playbackthread提供共享内存,则执行C++层track的write方法。
  • 如果track->sharedBuffer() != 0,即由APP端提供共享内存,则直接执行memcpy操作,给track->sharedBuffer()赋值。