AudioTrack與AudioFlinger在兩個不同進程,他們之間要通過共享內存進行音頻的數據交換。
交換的實現通過環形緩衝去來實現,貌似沒有同步機制,從實驗結果來看,AudioTrack寫滿緩衝區後AudioFlinger就會去讀取。
數據交換的實現主要在AudioTrackShared.cpp中實現,包括AudioTrackClientProxy和AudioTrackServerProxy。
兩邊數據通過cblk的flag來進行數據的讀寫。
//兩邊操作數據的接口
class Proxy : public RefBase {
...
public:
struct Buffer {
size_t mFrameCount; // number of frames available in this buffer
void* mRaw; // pointer to first frame
size_t mNonContig; // number of additional non-contiguous frames available
};
protected:
// 共享內存的一些信息
audio_track_cblk_t* const mCblk; // the control block
void* const mBuffers; // starting address of buffers
const size_t mFrameCount; // not necessarily a power of 2
const size_t mFrameSize; // in bytes
const size_t mFrameCountP2; // mFrameCount rounded to power of 2, streaming mode
const bool mIsOut; // true for AudioTrack, false for AudioRecord
const bool mClientInServer; // true for OutputTrack, false for AudioTrack & AudioRecord
bool mIsShutdown; // latch set to true when shared memory corruption detected
size_t mUnreleased; // unreleased frames remaining from most recent obtainBuffer
};
客戶端的流程:獲取Buffer -> 填充數據 -> 釋放Buffer
//獲取Buffer在obtainBuffer中實現,裏面還有一些關於獲取失敗的等待方式的一些東西
//cblk的buffer通過rear和front,通過log可以看出rear和front都是增長的,rear - front就是填充了數據的緩衝區,怎麼映射到buffer上還要再看。
status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
struct timespec *elapsed)
{
//time的初始化
// 幾種Timeout方式
enum {
TIMEOUT_ZERO, // requested == NULL || *requested == 0
TIMEOUT_INFINITE, // *requested == infinity
TIMEOUT_FINITE, // 0 < *requested < infinity
TIMEOUT_CONTINUE, // additional chances after TIMEOUT_FINITE
} timeout;
// 一個死循環來獲取Buffer,通過break和goto end來實現不同的Timeout
for (;;) {
...(檢查cblk的flag)
// compute number of frames available to write (AudioTrack) or read (AudioRecord)
int32_t front;
int32_t rear;
if (mIsOut) {
...(這裏有一大段註釋,說android_atomic_acquire_load可能是無用的,但就是要加,就是要任性..)
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
rear = cblk->u.mStreaming.mRear;
} else {
rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
front = cblk->u.mStreaming.mFront;
}
// 獲取填充數據的buffer
ssize_t filled = rear - front;
// pipe should not be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
...(gg了)
}
// 獲取可以利用的空間
size_t avail = mIsOut ? mFrameCount - filled : filled;
if (avail > 0) {
// 'avail' may be non-contiguous, so return only the first contiguous chunk
// 這裏要處理的是這種情況,像如下的buffer(*是數據)
// __________**********__________
// 獲取到avail是兩邊空白的和,這裏只能要一邊
...
// 獲取到了buffer,走人
status = NO_ERROR;
break;
}
...(後面是avail等於0的情況,有可能是server那邊沒讀完,也有可能其他情況,根據不同的Timeout方式選擇等待或放棄)
}
end: ...(錯誤處理等)
}
//填充數據沒有特殊的api,一般用memcpy就可以了
memcpy(audioBuffer.i8, buffer, toWrite);
buffer = ((const char *) buffer) + toWrite;
userSize -= toWrite;
written += toWrite;
// 釋放數據很簡單
void ClientProxy::releaseBuffer(Buffer* buffer)
{
...(參數檢查,避免釋放不合法的buffer
mUnreleased -= stepCount;
audio_track_cblk_t* cblk = mCblk;
// 其實就只改了一個指針
if (mIsOut) {
int32_t rear = cblk->u.mStreaming.mRear;
android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);
} else {
int32_t front = cblk->u.mStreaming.mFront;
android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);
}
}
服務端也是差不多的流程:獲取Buffer -> 使用數據 -> 釋放Buffer
//獲取Buffer在obtainBuffer中實現,方式跟客戶端的差不多
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
·{
...(參數檢查,避免buffer爲空等)
if (mIsOut) {
int32_t flush = cblk->u.mStreaming.mFlush;
rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
front = cblk->u.mStreaming.mFront;
if (flush != mFlush) {
// effectively obtain then release whatever is in the buffer
// Note:這裏有一大段修正Front的 不知在幹嗎
}
} else {
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
rear = cblk->u.mStreaming.mRear;
}
// 計算客戶端填了多少數據
size_t availToServer;
if (mIsOut) {
availToServer = filled;
mAvailToClient = mFrameCount - filled;
} else {
availToServer = mFrameCount - filled;
mAvailToClient = filled;
}
// 'availToServer' may be non-contiguous, so return only the first contiguous chunk
...(這裏跟客戶端一樣也有是去左右其中一段)
no_init:
...(錯誤處理)
}
// 服務端使用數據場景比較複雜,主要是混音跟重採樣比較麻煩
// obtainBuffer被分裝在Track(AudioFlinger的一個內部類)的getNextBuffer中
// DirectOutputThread中使用跟客戶端差不多,也是直接寫
memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
frameCount -= buffer.frameCount;
curBuf += buffer.frameCount * mFrameSize;
// 其他的基本都是在AudioMixer中被使用,
// 比如重採樣,hook是一個函數指針,根據配置會選擇不同的採樣函數
t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,
state->resampleTemp, aux);
// 比如混音
t.bufferProvider->getNextBuffer(&b, outputPTS);
const int16_t *in = b.i16;
do {
uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
in += 2;
int32_t l = mulRL(1, rl, vrl) >> 12;
int32_t r = mulRL(0, rl, vrl) >> 12;
// clamping...
l = clamp16(l);
r = clamp16(r);
*out++ = (r<<16) | (l & 0xFFFF);
} while (--outFrames);
// 釋放Buffer也比較簡單
void ServerProxy::releaseBuffer(Buffer* buffer)
{
...(參數檢查)
// 基本跟客戶端一樣
if (mIsOut) {
int32_t front = cblk->u.mStreaming.mFront;
android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);
} else {
int32_t rear = cblk->u.mStreaming.mRear;
android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);
}
// 喚醒客戶端
...(各種參數的計算)
if (!(old & CBLK_FUTEX_WAKE)) {
(void) syscall(__NR_futex, &cblk->mFutex,
mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
}
...(清空buffer)
}
這裏僅僅是數據的交換流程,具體控制在Track裏,Track的各種狀態都會影響改流程的。