Audio筆記之AudioTrack

        播放聲音可以用MediaPlayer和AudioTrack,兩者都提供了java API供應用開發者使用。雖然都可以播放聲音,但兩者還是有很大的區別的。其中最大的區別是MediaPlayer可以播放多種格式的聲音文件,例如MP3,AAC,WAV,OGG,MIDI等。MediaPlayer會在framework層創建對應的音頻解碼器。而AudioTrack只能播放已經解碼的PCM流,如果是文件的話只支持wav格式的音頻文件,因爲wav格式的音頻文件大部分都是PCM流。AudioTrack不創建解碼器,所以只能播放不需要解碼的wav文件。當然兩者之間還是有緊密的聯繫,MediaPlayer在framework層還是會創建AudioTrack,把解碼後的PCM數流傳遞給AudioTrack,AudioTrack再傳遞給AudioFlinger進行混音,然後才傳遞給硬件播放,所以是MediaPlayer包含了AudioTrack。使用AudioTrack播放音樂示例:

/*cts/tests/tests/media/src/android/media/cts*/

    public voidtestSetStereoVolumeMax()  throwsException {

        final String TEST_NAME= "testSetStereoVolumeMax";

        final int TEST_SR =22050;

        final int TEST_CONF =AudioFormat.CHANNEL_CONFIGURATION_STEREO;

        final int TEST_FORMAT= AudioFormat.ENCODING_PCM_16BIT;

        final int TEST_MODE =AudioTrack.MODE_STREAM;

        final intTEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;


        // --------initialization --------------

        /*Step1.*/
       //根據Hal buff size計算需要爲AudioTrack分配的buff size,
       //後續會將這些buff分配給track的mblck,用來進行數據傳輸
        int  minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);

        /*Step 2.*/
        AudioTrack  track = newAudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF,
                                                                         TEST_FORMAT, 2 * minBuffSize,TEST_MODE);

        byte data[] = newbyte[minBuffSize];

        // -------- test--------------

        track.write(data, OFFSET_DEFAULT, data.length);

        track.write(data, OFFSET_DEFAULT, data.length);

        track.play();

        float maxVol =AudioTrack.getMaxVolume();

        assertTrue(TEST_NAME, track.setStereoVolume(maxVol, maxVol) == AudioTrack.SUCCESS);

        // -------- tear down--------------

        track.release();

    }

  //默認sessionId設置爲0 
   public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
            int bufferSizeInBytes, int mode)
    throws IllegalArgumentException {
        this(streamType, sampleRateInHz, channelConfig, audioFormat,
                bufferSizeInBytes, mode, 0 /*session*/);
    }

public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
            int bufferSizeInBytes, int mode, int sessionId)
    throws IllegalArgumentException {
        // mState already == STATE_UNINITIALIZED

        // remember which looper is associated with the AudioTrack instantiation
        Looper looper;
        if ((looper = Looper.myLooper()) == null) {
            looper = Looper.getMainLooper();
        }
       //保留創建該AudioTrack的loop,後續可以進行message傳遞
        mInitializationLooper = looper;
/** 
     * 參數檢查 
     * 1.檢查streamType是合法,並賦值給mStreamType 
     * 2.檢查sampleRateInHz是否在4000到48000之間,並賦值給mSampleRate 
     * 3.設置mChannels,將聲道轉換爲 CHANNEL_OUT_MONO(單聲道)
            或CHANNEL_OUT_STEREO(雙聲道)
     * 4.設置mAudioFormat:  
     *      ENCODING_PCM_16BIT、ENCODING_DEFAULT ---> ENCODING_PCM_16BIT 
     *      ENCODING_PCM_8BIT ---> ENCODING_PCM_8BIT 
     * 5.設置mDataLoadMode: MODE_STREAM 或MODE_STATIC 
     */  
        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);

    / *根據採樣精度計算每幀字節大小和緩衝區幀數=緩衝區/每幀大小 ,
      *且緩衝區幀數必須是每幀大小的整數倍
     */  
        audioBuffSizeCheck(bufferSizeInBytes);

        if (sessionId < 0) {
            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
        }

        int[] session = new int[1];
        session[0] = sessionId;
        // native initialization
        int initResult = native_setup(new WeakReference<AudioTrack>(this),
                mStreamType, mSampleRate, mChannels, mAudioFormat,
                mNativeBufferSizeInBytes, mDataLoadMode, session);
        if (initResult != SUCCESS) {
            loge("Error code "+initResult+" when initializing AudioTrack.");
            return; // with mState == STATE_UNINITIALIZED
        }
       //更新爲native層返回的sessionId
        mSessionId = session[0];

        if (mDataLoadMode == MODE_STATIC) {
            mState = STATE_NO_STATIC_DATA;
        } else {
            mState = STATE_INITIALIZED;
        }
    }

static jint
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jint streamType, jint sampleRateInHertz, jint javaChannelMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession)
{
    ALOGV("sampleRate=%d, audioFormat(from Java)=%d, channel mask=%x, buffSize=%d",
        sampleRateInHertz, audioFormat, javaChannelMask, buffSizeInBytes);
    uint32_t afSampleRate;
    size_t afFrameCount;
    //通過AudioSystem從AudioPolicyService中讀取對應音頻流類型的幀數
    //需要強調的是,getOutputxxxx函數其實很複雜,以getOutputFrameCount爲例
    //具體過程如下:
    //1、通過streamType找到strategy,
    //2、然後通過strategy找到device,
    //3、最後通過device找到output
    //4、返回output的FrameCount(硬件緩衝區包含的幀數)
    if (AudioSystem::getOutputFrameCount(&afFrameCount, (audio_stream_type_t) streamType) != NO_ERROR) {
        ALOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
        return (jint) AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
    }
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, (audio_stream_type_t) streamType) != NO_ERROR) {
        ALOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
        return (jint) AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
    }

    // Java channel masks don't map directly to the native definition, but it's a simple shift
    // to skip the two deprecated channel configurations "default" and "mono".
    // native層沒有default和mono類型的channel定義,需要轉化
    uint32_t nativeChannelMask = ((uint32_t)javaChannelMask) >> 2;

    if (!audio_is_output_channel(nativeChannelMask)) {
        ALOGE("Error creating AudioTrack: invalid channel mask %#x.", javaChannelMask);
        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
    }
    //計算bit 1的數目,每個bit代表一個聲道
    int nbChannels = popcount(nativeChannelMask);

    // check the stream type
    audio_stream_type_t atStreamType;
    switch (streamType) {
    case AUDIO_STREAM_VOICE_CALL:
    case AUDIO_STREAM_SYSTEM:
    case AUDIO_STREAM_RING:
    case AUDIO_STREAM_MUSIC:
    case AUDIO_STREAM_ALARM:
    case AUDIO_STREAM_NOTIFICATION:
    case AUDIO_STREAM_BLUETOOTH_SCO:
    case AUDIO_STREAM_DTMF:
        atStreamType = (audio_stream_type_t) streamType;
        break;
    default:
        ALOGE("Error creating AudioTrack: unknown stream type.");
        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
    }

    // check the format.
    // This function was called from Java, so we compare the format against the Java constants
    if ((audioFormat != ENCODING_PCM_16BIT) && (audioFormat != ENCODING_PCM_8BIT)) {
        ALOGE("Error creating AudioTrack: unsupported audio format.");
        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
    }

    // for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class
    // so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled
    // in android_media_AudioTrack_native_write_byte()
    // 擴展精度8BIT->16BIT和緩衝區*2,通過將精度設置爲16BIT
    //因爲在static模式下,底層只支持16BIT,
    //後續寫數據的過程中也要做對應的處理
    if ((audioFormat == ENCODING_PCM_8BIT)
        && (memoryMode == MODE_STATIC)) {
        ALOGV("android_media_AudioTrack_native_setup(): requesting MODE_STATIC for 8bit \
            buff size of %dbytes, switching to 16bit, buff size of %dbytes",
            buffSizeInBytes, 2*buffSizeInBytes);
        audioFormat = ENCODING_PCM_16BIT;
        // we will need twice the memory to store the data
        buffSizeInBytes *= 2;
    }

    // /根據不同的採樣方式得到一個採樣點的字節數,然後計算緩衝區幀數
    int bytesPerSample = audioFormat == ENCODING_PCM_16BIT ? 2 : 1;
    audio_format_t format = audioFormat == ENCODING_PCM_16BIT ?
            AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_8_BIT;
    int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }

    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIOTRACK_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIOTRACK_ERROR;
    }
    int sessionId = nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    // create the native AudioTrack object
    sp<AudioTrack> lpTrack = new AudioTrack();

    // initialize the callback information:
    // this data will be passed with every AudioTrack callback
    AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
    lpJniStorage->mStreamType = atStreamType;
    lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
    // we use a weak reference so the AudioTrack object can be garbage collected.
    lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
    lpJniStorage->mCallbackData.busy = false;

    // initialize the native AudioTrack object
    switch (memoryMode) {
    case MODE_STREAM:
        //STREAM模式,現在沒有申請共享內存,後續Track對象通過
        //AudioFlinger中的Client對象申請heap空間給mCblk進行數據傳輸
        lpTrack->set(
            atStreamType,// stream type
            sampleRateInHertz,
            format,// word length, PCM
            nativeChannelMask,
            frameCount,
            AUDIO_OUTPUT_FLAG_NONE,
            audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
            0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
            0,// shared mem
            true,// thread can call Java
            sessionId);// audio session ID
        break;

    case MODE_STATIC:
        // STATIC模式,爲AudioTrack分配共享內存區域,大小爲buffSizeInBytes
    //後續會將這些buff分配給track的mCblk進行數據傳輸
        if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
            ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
            goto native_init_failure;
        }

        lpTrack->set(
            atStreamType,// stream type
            sampleRateInHertz,
            format,// word length, PCM
            nativeChannelMask,
            frameCount,
            AUDIO_OUTPUT_FLAG_NONE,
            audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
            0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
            lpJniStorage->mMemBase,// shared mem
            true,// thread can call Java
            sessionId);// audio session ID
        break;

    default:
        ALOGE("Unknown mode %d", memoryMode);
        goto native_init_failure;
    }

    if (lpTrack->initCheck() != NO_ERROR) {
        ALOGE("Error initializing AudioTrack");
        goto native_init_failure;
    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        goto native_init_failure;
    }
    // read the audio session ID back from AudioTrack in case we create a new session
    //從C層的AudioTrack中更新sessionId
    nSession[0] = lpTrack->getSessionId();
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    {   // scope for the lock
        Mutex::Autolock l(sLock);
        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
    }
    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
    // of the Java object (in mNativeTrackInJavaObj)
    setAudioTrack(env, thiz, lpTrack);

    // save the JNI resources so we can free them later
    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);

    return (jint) AUDIOTRACK_SUCCESS;

    // failures:
native_init_failure:
    if (nSession != NULL) {
        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    }
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
    delete lpJniStorage;
    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}

static void audioCallback(int event, void* user, void *info) {

    audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
    {
        Mutex::Autolock l(sLock);
        if (sAudioTrackCallBackCookies.indexOf(callbackInfo) < 0) {
            return;
        }
        callbackInfo->busy = true;
    }

    switch (event) {
    case AudioTrack::EVENT_MARKER: {
        JNIEnv *env = AndroidRuntime::getJNIEnv();
        if (user != NULL && env != NULL) {
            env->CallStaticVoidMethod(
                callbackInfo->audioTrack_class,
                javaAudioTrackFields.postNativeEventInJava,
                callbackInfo->audioTrack_ref, event, 0,0, NULL);
            if (env->ExceptionCheck()) {
                env->ExceptionDescribe();
                env->ExceptionClear();
            }
        }
        } break;

    case AudioTrack::EVENT_NEW_POS: {
        JNIEnv *env = AndroidRuntime::getJNIEnv();
        if (user != NULL && env != NULL) {
            env->CallStaticVoidMethod(
                callbackInfo->audioTrack_class,
                javaAudioTrackFields.postNativeEventInJava,
                callbackInfo->audioTrack_ref, event, 0,0, NULL);
            if (env->ExceptionCheck()) {
                env->ExceptionDescribe();
                env->ExceptionClear();
            }
        }
        } break;
    }

    {
        Mutex::Autolock l(sLock);
        callbackInfo->busy = false;
        callbackInfo->cond.broadcast();
    }
}

            status_t    set(audio_stream_type_t streamType,
                            uint32_t sampleRate,
                            audio_format_t format,
                            audio_channel_mask_t channelMask,
                            int frameCount      = 0,
                            audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                            callback_t cbf      = NULL,
                            void* user          = NULL,
                            int notificationFrames = 0,
                            const sp<IMemory>& sharedBuffer = 0,
                            bool threadCanCallJava = false,
                            int sessionId       = 0,
                            transfer_type transferType = TRANSFER_DEFAULT,
                            const audio_offload_info_t *offloadInfo = NULL,
                            int uid = -1);

status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCountInt,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        int uid)
{
    //根據transferType、共享內存、回調函數、threadCanCallJava參數
    //設置音頻數據傳輸類型 ,一共有4中傳輸類型,不過常用就只有
    // CALLBACK和SHARED
    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (sharedBuffer != 0) {
            transferType = TRANSFER_SHARED;
        } else if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL || sharedBuffer != 0) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        if (sharedBuffer != 0) {
            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_SHARED:
        if (sharedBuffer == 0) {
            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
            return BAD_VALUE;
        }
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // FIXME "int" here is legacy and will be replaced by size_t later
    if (frameCountInt < 0) {
        ALOGE("Invalid frame count %d", frameCountInt);
        return BAD_VALUE;
    }
    size_t frameCount = frameCountInt;

    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
            sharedBuffer->size());

    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);

    AutoMutex lock(mLock);

    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    mOutput = 0;

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }

    if (sampleRate == 0) {
        uint32_t afSampleRate;
        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
            return NO_INIT;
        }
        sampleRate = afSampleRate;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    if (channelMask == 0) {
        channelMask = AUDIO_CHANNEL_OUT_STEREO;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %d", format);
        return BAD_VALUE;
    }

    // AudioFlinger does not currently support 8-bit data in shared memory
    //STATCI模式時,不支持8BIT精度,JNI層已轉換到16BIT
    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
        ALOGE("8-bit data in shared memory is not supported");
        return BAD_VALUE;
    }

    // force direct flag if format is not linear PCM
    // or offload was requested
    //如果是offload類型或者非PCM類型的數據,強制設爲direct類型
    //即如果不是PCM類型,則直接輸出,由硬件進行解析
    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
            || !audio_is_linear_pcm(format)) {
        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
                    ? "Offload request, forcing to Direct Output"
                    : "Not linear PCM, forcing to Direct Output");
        flags = (audio_output_flags_t)
                // FIXME why can't we allow direct AND fast?
                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    }
    // only allow deep buffering for music stream type
    if (streamType != AUDIO_STREAM_MUSIC) {
        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
    }

    if (!audio_is_output_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = popcount(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
        mFrameSizeAF = channelCount * sizeof(int16_t);
    } else {
        mFrameSize = sizeof(uint8_t);
        mFrameSizeAF = sizeof(uint8_t);
    }

    audio_io_handle_t output = AudioSystem::getOutput(
                                    streamType,
                                    sampleRate, format, channelMask,
                                    flags,
                                    offloadInfo);

    if (output == 0) {
        ALOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }

    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mSendLevel = 0.0f;
    mFrameCount = frameCount;
    mReqFrameCount = frameCount;
    mNotificationFramesReq = notificationFrames;
    mNotificationFramesAct = 0;
    mSessionId = sessionId;
    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    mAuxEffectId = 0;
    mFlags = flags;
    mCbf = cbf;
    /*cbf爲JNI層設置的回調函數
       因爲cbf是audioCallback不爲空,所以這裏會啓動一個AudioTrackThread線程。
       這個線程是用於AudioTrack(native)與AudioTrack(java)間的數據事件通知的,
       這就爲上層應用處理事件提供了一個入口,包括:
       EVENT_MORE_DATA = 0,  /*請求寫入更多數據*/
       EVENT_UNDERRUN = 1,  /*PCM 緩衝發生了underrun*/
       EVENT_LOOP_END = 2,   /*到達loop end,loop count!=null從loop start重新開始回放*/
       EVENT_MARKER = 3,     /*Playback head在指定的位置,參考setMarkerPosition*/
       EVENT_NEW_POS = 4,   /*Playback head在一個新的位置,參考setPositionUpdatePeriod */
       EVENT_BUFFER_END = 5 /*Playback head在buffer末尾*/
      */
    if (cbf != NULL) {
        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
    }

    // create the IAudioTrack
    status_t status = createTrack_l(streamType,
                                  sampleRate,
                                  format,
                                  frameCount,
                                  flags,
                                  sharedBuffer,
                                  output,
                                  0 /*epoch*/);

    if (status != NO_ERROR) {
        if (mAudioTrackThread != 0) {
            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
            mAudioTrackThread->requestExitAndWait();
            mAudioTrackThread.clear();
        }
        //Use of direct and offloaded output streams is ref counted by audio policy manager.
        // As getOutput was called above and resulted in an output stream to be opened,
        // we need to release it.
        AudioSystem::releaseOutput(output);
        return status;
    }

    mStatus = NO_ERROR;
    mStreamType = streamType;
    mFormat = format;
    mSharedBuffer = sharedBuffer;
    mState = STATE_STOPPED;
    mUserData = user;
    mLoopPeriod = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInUnderrun = false;
    mOutput = output;

    return NO_ERROR;
}

// must be called with mLock held
status_t AudioTrack::createTrack_l(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        size_t frameCount,
        audio_output_flags_t flags,
        const sp<IMemory>& sharedBuffer,
        audio_io_handle_t output,
        size_t epoch)
{
    status_t status;
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
        ALOGE("Could not get audioflinger");
        return NO_INIT;
    }

    // Not all of these values are needed under all conditions, but it is easier to get them all
    // 讀取Hal層的各類參數
    uint32_t afLatency;
    status = AudioSystem::getLatency(output, streamType, &afLatency);
    if (status != NO_ERROR) {
        ALOGE("getLatency(%d) failed status %d", output, status);
        return NO_INIT;
    }

    size_t afFrameCount;
    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
    if (status != NO_ERROR) {
        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
        return NO_INIT;
    }

    uint32_t afSampleRate;
    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
    if (status != NO_ERROR) {
        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
        return NO_INIT;
    }

    // Client decides whether the track is TIMED (see below), but can only express a preference
    // for FAST.  Server will perform additional tests.
    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
            // either of these use cases:
            // use case 1: shared buffer
            (sharedBuffer != 0) ||
            // use case 2: callback handler
            (mCbf != NULL))) {
        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
        // once denied, do not request again if IAudioTrack is re-created
        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
        mFlags = flags;
    }
    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);

    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && sampleRate != afSampleRate) {
        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client due to mismatching sample rate (%d vs %d)",
              sampleRate, afSampleRate);
        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
    }

    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
    //  n = 1   fast track with single buffering; nBuffering is ignored
    //  n = 2   fast track with double buffering
    //  n = 2   normal track, no sample rate conversion
    //  n = 3   normal track, with sample rate conversion
    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
    // 根據硬件採樣率來設置緩衝區,最少爲硬件緩衝的兩倍
    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;

    mNotificationFramesAct = mNotificationFramesReq;
     //以下三種情況需要更新frameCount
     //1、非PCM類型數據,如果是STATIC類型,則通過緩衝區來
     //      獲得緩衝區幀數,否則使用Hal層參數更新
     //2、PCM類型數據,如果是STATIC類型,對齊緩衝區buffer,
     //      則通過緩衝區來獲得緩衝區幀數
     //3、PCM類型數據,如果是STREAM類型,且非FAST TRACK時,
     //       通過硬件延時來計算最小緩衝區,比較當前緩衝區和最小緩衝區
     //        如果最小緩衝區大於當前緩衝區,則更新緩衝區幀數
     //4、如果是FAST TRACK類型,由服務端進行緩衝區幀數的計算和校驗
    if (!audio_is_linear_pcm(format)) {

        if (sharedBuffer != 0) { //static模式  
            // Same comment as below about ignoring frameCount parameter for set()
            frameCount = sharedBuffer->size();
        } else if (frameCount == 0) {
            frameCount = afFrameCount;
        }
        if (mNotificationFramesAct != frameCount) {
            mNotificationFramesAct = frameCount;
        }
    } else if (sharedBuffer != 0) { // static模式  

        // Ensure that buffer alignment matches channel count
        // 8-bit data in shared memory is not currently supported by AudioFlinger
        // buffer對齊,原理未知
        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
        if (mChannelCount > 1) {
            // More than 2 channels does not require stronger alignment than stereo
            alignment <<= 1;
        }
        if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
            ALOGE("Invalid buffer alignment: address %p, channel count %u",
                    sharedBuffer->pointer(), mChannelCount);
            return BAD_VALUE;
        }

        // When initializing a shared buffer AudioTrack via constructors,
        // there's no frameCount parameter.
        // But when initializing a shared buffer AudioTrack via set(),
        // there _is_ a frameCount parameter.  We silently ignore it.
        //因爲AudioTrack的構造函數沒有傳入frameCount,
        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);

    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {

        // FIXME move these calculations and associated checks to server

        // Ensure that buffer depth covers at least audio hardware latency
        //根據硬件延遲計算最少需要幾個硬件buffer塊大小的緩衝區
        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
                afFrameCount, minBufCount, afSampleRate, afLatency);
        if (minBufCount <= nBuffering) {
            minBufCount = nBuffering;
        }
        //相當於minFrameCount/ sampleRate = (afFrameCount/ afSampleRate)* minBufCount
        //將minBufCount代入,即minFrameCount = (afLatency* sampleRate)/1000
        //當硬件有延遲時,需要一定的內存緩衝區來進行緩衝,避免數據丟失,
        //值得說明的是理想情況下,SIZE(內存緩衝區)==SIZE(硬件緩衝區)
        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
                ", afLatency=%d",
                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);

        if (frameCount == 0) {
            frameCount = minFrameCount;
        } else if (frameCount < minFrameCount) {
            // not ALOGW because it happens all the time when playing key clicks over A2DP
            ALOGV("Minimum buffer size corrected from %d to %d",
                     frameCount, minFrameCount);
            frameCount = minFrameCount;
        }
        // Make sure that application is notified with sufficient margin before underrun
        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
            mNotificationFramesAct = frameCount/nBuffering;
        }

    } else {
        // For fast tracks, the frame count calculations and checks are done by server
    }

    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
    if (mIsTimed) {
        trackFlags |= IAudioFlinger::TRACK_TIMED;
    }

    pid_t tid = -1;
    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
        trackFlags |= IAudioFlinger::TRACK_FAST;
        if (mAudioTrackThread != 0) {
            tid = mAudioTrackThread->getTid();
        }
    }

    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
    }
    //調用Flinger構造Track對象,並返回TrackHand的句柄,需要繼續分析
    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      sampleRate,
                                                      // AudioFlinger only sees 16-bit PCM
                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
                                                              AUDIO_FORMAT_PCM_16_BIT : format,
                                                      mChannelMask,
                                                      frameCount,
                                                      &trackFlags,
                                                      sharedBuffer,
                                                      output,
                                                      tid,
                                                      &mSessionId,
                                                      mName,
                                                      mClientUid,
                                                      &status);

    if (track == 0) {
        ALOGE("AudioFlinger could not create track, status: %d", status);
        return status;
    }
    //保存AudioFlinge端申請的track對象(其實是TrackHandl)對象,及Cblk管理的內存
    sp<IMemory> iMem = track->getCblk();
    if (iMem == 0) {
        ALOGE("Could not get control block");
        return NO_INIT;
    }
    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioTrack = track;
    mCblkMemory = iMem;
    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
    mCblk = cblk;
    size_t temp = cblk->frameCount_;

   //比較請求的緩衝區幀數frameCount和實際分配的緩衝區幀數temp,並將請求的幀數更新爲實際幀數
    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
        // In current design, AudioTrack client checks and ensures frame count validity before
        // passing it to AudioFlinger so AudioFlinger should not return a different value except
        // for fast track as it uses a special method of assigning frame count.
        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
    }
    frameCount = temp;
    mAwaitBoost = false;
    //flags中包含創建fast track的標誌位
    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
        //audioflinge創建完fast track之後返回的標誌位,如果該bit爲1,表示創建成功,否則失敗
        if (trackFlags & IAudioFlinger::TRACK_FAST) {  
            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);  
            mAwaitBoost = true;  
            if (sharedBuffer == 0) {  
                // Theoretically double-buffering is not required for fast tracks,  
                // due to tighter scheduling.  But in practice, to accommodate kernels with  
                // scheduling jitter, and apps with computation jitter, we use double-buffering.  
                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {  
                    mNotificationFramesAct = frameCount/nBuffering;  
                }  
            }  
        } else {  
            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);  
            // once denied, do not request again if IAudioTrack is re-created  
            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);  
            mFlags = flags;  
            if (sharedBuffer == 0) {  
                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {  
                    mNotificationFramesAct = frameCount/nBuffering;  
                }  
            }  
        }  
    }  
    //flags中包含創建offload的標誌位
    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {  
        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {  
            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");  
        } else {  
            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");  
            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);  
            mFlags = flags;  
            return NO_INIT;  
        }  
    }  
 
    mRefreshRemaining = true;  
 
    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers  
    // is the value of pointer() for the shared buffer, otherwise buffers points  
    // immediately after the control block.  This address is for the mapping within client  
    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
    // 如果是SREAM模式,則需要跳過cblk的地址,如果是STATIC模式,則直接使用shareBuffer的首地址 
    void* buffers;  
    if (sharedBuffer == 0) {  
        buffers = (char*)cblk + sizeof(audio_track_cblk_t);  
    } else {  
        buffers = sharedBuffer->pointer();  
    }  
    //操作audioflinge端與該effect關聯的track,由於effectId==0,實際上是將該
    //track對應的mAuxBuffer置爲unll,相當於初始化該track,具體過程後續分析
    mAudioTrack->attachAuxEffect(mAuxEffectId);

    // FIXME don't believe this lie  
    mLatency = afLatency + (1000*frameCount) / sampleRate;  
    mFrameCount = frameCount;  
    // If IAudioTrack is re-created, don't let the requested frameCount  
    // decrease.  This can confuse clients that cache frameCount().  
    if (frameCount > mReqFrameCount) {  
        mReqFrameCount = frameCount;  
    }  
 
    // update proxy
    // 根據STREAM和STATIC類型構造AudioTrack的代理對象,供客戶端調用
    // 保存緩衝區控制塊、緩衝區數據塊首地址、已申請緩衝區幀數、AudioFlinge端每幀大小 
    if (sharedBuffer == 0) {  
        mStaticProxy.clear();  
        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);  
    } else {  
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);  
        mProxy = mStaticProxy;  
    }  
    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |  
            uint16_t(mVolume[LEFT] * 0x1000));  
    mProxy->setSendLevel(mSendLevel);  
    mProxy->setSampleRate(mSampleRate);  
    mProxy->setEpoch(epoch);  
    mProxy->setMinimum(mNotificationFramesAct);  
 
    mDeathNotifier = new DeathNotifier(this);  
    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);  
 
    return NO_ERROR;  
}


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章