CameraService是如何通過Surface將幀數據刷新到屏上的初步研究

本文對CameraService如何將幀數據傳遞給surface,最終顯示到屏幕上做初步的研究。
CameraService將幀數據送給surface的入口函數爲surface的queuebuffer。
在相機正常啓動預覽後,CameraService會不斷的收到CameraProvider發來的IPC數據(HAL層發來的幀數據),
對應的函數入口爲

//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
// hardware::camera::device::V3_2::ICameraDeviceCallback實現方法
hardware::Return<void> Camera3Device::processCaptureResult(
        const hardware::hidl_vec<
                hardware::camera::device::V3_2::CaptureResult>& results) {
    .....
    // 繼續回調數據
    for (const auto& result : results) {
        processOneCaptureResultLocked(result);
    }
    mProcessCaptureResultLock.unlock();
    return hardware::Void();
}

接着分析下processOneCaptureResultLocked

//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
void Camera3Device::processOneCaptureResultLocked(
        const hardware::camera::device::V3_2::CaptureResult& result) {
     //首先將hardware::camera::device::V3_2::CaptureResult類型result轉換爲camera3_capture_result類型r
    camera3_capture_result r;
    status_t res;
    r.frame_number = result.frameNumber;

    hardware::camera::device::V3_2::CameraMetadata resultMetadata;
    //result.fmqResultSize爲0
    if (result.fmqResultSize > 0) {
        resultMetadata.resize(result.fmqResultSize);
        if (mResultMetadataQueue == nullptr) {
            return; // logged in initialize()
        }
        if (!mResultMetadataQueue->read(resultMetadata.data(), result.fmqResultSize)) {
            ALOGE("%s: Frame %d: Cannot read camera metadata from fmq, size = %" PRIu64,
                    __FUNCTION__, result.frameNumber, result.fmqResultSize);
            return;
        }
    } else {
        //將result.result轉換爲 hardware::camera::device::V3_2::CameraMetadata類型resultMetadata
        resultMetadata.setToExternal(const_cast<uint8_t *>(result.result.data()),
                result.result.size());
    }
    //將resultMetadata轉換爲camera_metadata_t類型 r.result 
    if (resultMetadata.size() != 0) {
        r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
        size_t expected_metadata_size = resultMetadata.size();
        if ((res = validate_camera_metadata_structure(r.result, &expected_metadata_size)) != OK) {
            ALOGE("%s: Frame %d: Invalid camera metadata received by camera service from HAL: %s (%d)",
                    __FUNCTION__, result.frameNumber, strerror(-res), res);
            return;
        }
    } else {
        r.result = nullptr;
    }
   //轉換outputBuffers
    std::vector<camera3_stream_buffer_t> outputBuffers(result.outputBuffers.size());
    std::vector<buffer_handle_t> outputBufferHandles(result.outputBuffers.size());
    for (size_t i = 0; i < result.outputBuffers.size(); i++) {
        auto& bDst = outputBuffers[i];
        const StreamBuffer &bSrc = result.outputBuffers[i];
        //檢查是否屬於某個已經存在的mOutputStreams
        //如果存在,則返回該streamId的在mOutputStreams的索引
        ssize_t idx = mOutputStreams.indexOfKey(bSrc.streamId);
        if (idx == NAME_NOT_FOUND) {
            ALOGE("%s: Frame %d: Buffer %zu: Invalid output stream id %d",
                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
            return;
        }
        bDst.stream = mOutputStreams.valueAt(idx)->asHalStream();

        buffer_handle_t *buffer;
        //根據result.frameNumber和bSrc.streamId,獲取與之對應的buffer
        res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
        if (res != OK) {
            ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
                    __FUNCTION__, result.frameNumber, i, bSrc.streamId);
            return;
        }
        //將獲取的buffer信息賦值給bDst
        bDst.buffer = buffer;
        bDst.status = mapHidlBufferStatus(bSrc.status);
        bDst.acquire_fence = -1;
        if (bSrc.releaseFence == nullptr) {
            bDst.release_fence = -1;
        } else if (bSrc.releaseFence->numFds == 1) {
            bDst.release_fence = dup(bSrc.releaseFence->data[0]);
        } else {
            ALOGE("%s: Frame %d: Invalid release fence for buffer %zu, fd count is %d, not 1",
                    __FUNCTION__, result.frameNumber, i, bSrc.releaseFence->numFds);
            return;
        }
    }
    r.num_output_buffers = outputBuffers.size();
    r.output_buffers = outputBuffers.data();
    //轉換inputBuffer,一般情況下result.inputBuffer.streamId 爲 -1
    camera3_stream_buffer_t inputBuffer;
    if (result.inputBuffer.streamId == -1) {
        r.input_buffer = nullptr;
    } else {
        if (mInputStream->getId() != result.inputBuffer.streamId) {
            ALOGE("%s: Frame %d: Invalid input stream id %d", __FUNCTION__,
                    result.frameNumber, result.inputBuffer.streamId);
            return;
        }
        inputBuffer.stream = mInputStream->asHalStream();
        buffer_handle_t *buffer;
        res = mInterface->popInflightBuffer(result.frameNumber, result.inputBuffer.streamId,
                &buffer);
        if (res != OK) {
            ALOGE("%s: Frame %d: Input buffer: No in-flight buffer for stream %d",
                    __FUNCTION__, result.frameNumber, result.inputBuffer.streamId);
            return;
        }
        inputBuffer.buffer = buffer;
        inputBuffer.status = mapHidlBufferStatus(result.inputBuffer.status);
        inputBuffer.acquire_fence = -1;
        if (result.inputBuffer.releaseFence == nullptr) {
            inputBuffer.release_fence = -1;
        } else if (result.inputBuffer.releaseFence->numFds == 1) {
            inputBuffer.release_fence = dup(result.inputBuffer.releaseFence->data[0]);
        } else {
            ALOGE("%s: Frame %d: Invalid release fence for input buffer, fd count is %d, not 1",
                    __FUNCTION__, result.frameNumber, result.inputBuffer.releaseFence->numFds);
            return;
        }
        r.input_buffer = &inputBuffer;
    }

    r.partial_result = result.partialResult;
   //繼續回調轉換得到的camera3_capture_result類型幀數據
    processCaptureResult(&r);
}

接着分析下processCaptureResult(const camera3_capture_result *result)

//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
void Camera3Device::processCaptureResult(const camera3_capture_result *result) {
    ATRACE_CALL();

    status_t res;

    uint32_t frameNumber = result->frame_number;
    if (result->result == NULL && result->num_output_buffers == 0 &&
            result->input_buffer == NULL) {
        SET_ERR("No result data provided by HAL for frame %d",
                frameNumber);
        return;
    }

    if (!mUsePartialResult &&
            result->result != NULL &&
            result->partial_result != 1) {
        SET_ERR("Result is malformed for frame %d: partial_result %u must be 1"
                " if partial result is not supported",
                frameNumber, result->partial_result);
        return;
    }

    bool isPartialResult = false;
    CameraMetadata collectedPartialResult;
    CaptureResultExtras resultExtras;
    bool hasInputBufferInRequest = false;

    // Get shutter timestamp and resultExtras from list of in-flight requests,
    // where it was added by the shutter notification for this frame. If the
    // shutter timestamp isn't received yet, append the output buffers to the
    // in-flight request and they will be returned when the shutter timestamp
    // arrives. Update the in-flight status and remove the in-flight entry if
    // all result data and shutter timestamp have been received.
    nsecs_t shutterTimestamp = 0;

    {
        Mutex::Autolock l(mInFlightLock);
        //查找與frameNumber對應的request
        ssize_t idx = mInFlightMap.indexOfKey(frameNumber);
        if (idx == NAME_NOT_FOUND) {
            SET_ERR("Unknown frame number for capture result: %d",
                    frameNumber);
            return;
        }
        InFlightRequest &request = mInFlightMap.editValueAt(idx);
        ALOGVV("%s: got InFlightRequest requestId = %" PRId32
                ", frameNumber = %" PRId64 ", burstId = %" PRId32
                ", partialResultCount = %d, hasCallback = %d",
                __FUNCTION__, request.resultExtras.requestId,
                request.resultExtras.frameNumber, request.resultExtras.burstId,
                result->partial_result, request.hasCallback);
        // Always update the partial count to the latest one if it's not 0
        // (buffers only). When framework aggregates adjacent partial results
        // into one, the latest partial count will be used.
        if (result->partial_result != 0)
            request.resultExtras.partialResultCount = result->partial_result;

        // Check if this result carries only partial metadata
        //mUsePartialResult 爲false
        if (mUsePartialResult && result->result != NULL) {
            if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
                SET_ERR("Result is malformed for frame %d: partial_result %u must be  in"
                        " the range of [1, %d] when metadata is included in the result",
                        frameNumber, result->partial_result, mNumPartialResults);
                return;
            }
            isPartialResult = (result->partial_result < mNumPartialResults);
            if (isPartialResult) {
                request.collectedPartialResult.append(result->result);
            }

            if (isPartialResult && request.hasCallback) {
                // Send partial capture result
                sendPartialCaptureResult(result->result, request.resultExtras,
                        frameNumber);
            }
        }
        //獲取request的時間戳
        shutterTimestamp = request.shutterTimestamp;
        //檢查是否存在InputBuffer
        hasInputBufferInRequest = request.hasInputBuffer;

        // Did we get the (final) result metadata for this capture?
        //isPartialResult爲true
        if (result->result != NULL && !isPartialResult) {
            if (request.haveResultMetadata) {
                SET_ERR("Called multiple times with metadata for frame %d",
                        frameNumber);
                return;
            }
            if (mUsePartialResult &&
                    !request.collectedPartialResult.isEmpty()) {
                collectedPartialResult.acquire(
                    request.collectedPartialResult);
            }
            request.haveResultMetadata = true;
        }

        uint32_t numBuffersReturned = result->num_output_buffers;
        //result->input_buffer爲空
        if (result->input_buffer != NULL) {
            if (hasInputBufferInRequest) {
                numBuffersReturned += 1;
            } else {
                ALOGW("%s: Input buffer should be NULL if there is no input"
                        " buffer sent in the request",
                        __FUNCTION__);
            }
        }
        request.numBuffersLeft -= numBuffersReturned;
        if (request.numBuffersLeft < 0) {
            SET_ERR("Too many buffers returned for frame %d",
                    frameNumber);
            return;
        }

        camera_metadata_ro_entry_t entry;
        res = find_camera_metadata_ro_entry(result->result,
                ANDROID_SENSOR_TIMESTAMP, &entry);
        if (res == OK && entry.count == 1) {
            request.sensorTimestamp = entry.data.i64[0];
        }

        // If shutter event isn't received yet, append the output buffers to
        // the in-flight request. Otherwise, return the output buffers to
        // streams.
        if (shutterTimestamp == 0) {
            request.pendingOutputBuffers.appendArray(result->output_buffers,
                result->num_output_buffers);
        } else {
           //繼續回調幀數據
            returnOutputBuffers(result->output_buffers,
                result->num_output_buffers, shutterTimestamp);
        }
        //isPartialResult正常爲true
        if (result->result != NULL && !isPartialResult) {
            if (shutterTimestamp == 0) {
                request.pendingMetadata = result->result;
                request.collectedPartialResult = collectedPartialResult;
            } else if (request.hasCallback) {
                CameraMetadata metadata;
                metadata = result->result;
                sendCaptureResult(metadata, request.resultExtras,
                    collectedPartialResult, frameNumber,
                    hasInputBufferInRequest);
            }
        }
       //在nFlightRequest刪除掉idx對應的request
        removeInFlightRequestIfReadyLocked(idx);
    } // scope for mInFlightLock
    //檢查是否存在input_buffer
    //該出爲空,不做分析了
    if (result->input_buffer != NULL) {
       ....
    }
}

繼續分析下returnOutputBuffers

void Camera3Device::returnOutputBuffers(
        const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
        nsecs_t timestamp) {
     //遍歷所有outputBuffers,找到與之對應的Camera3Stream,然後調用其returnBuffer
    for (size_t i = 0; i < numBuffers; i++)
    {
        //找到與之對應的Camera3Stream
        //輸出流對應的是Camera3OutputStream
        Camera3Stream *stream = Camera3Stream::cast(outputBuffers[i].stream);
        //然後調用其returnBuffer
        status_t res = stream->returnBuffer(outputBuffers[i], timestamp);
        // Note: stream may be deallocated at this point, if this buffer was
        // the last reference to it.
        if (res != OK) {
            ALOGE("Can't return buffer to its stream: %s (%d)",
                strerror(-res), res);
        }
    }
}

Camera3OutputStream類圖如下:
在這裏插入圖片描述
接着分析下returnBuffer,調用的是其父類方法Camera3Stream::returnBuffer

//frameworks\av\services\camera\libcameraservice\device3\Camera3Stream.cpp
status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer,
        nsecs_t timestamp) {
    ATRACE_CALL();
    Mutex::Autolock l(mLock);

    // Check if this buffer is outstanding.
    if (!isOutstandingBuffer(buffer)) {
        ALOGE("%s: Stream %d: Returning an unknown buffer.", __FUNCTION__, mId);
        return BAD_VALUE;
    }

    removeOutstandingBuffer(buffer);

    /**
     * TODO: Check that the state is valid first.
     *
     * <HAL3.2 IN_CONFIG and IN_RECONFIG in addition to CONFIGURED.
     * >= HAL3.2 CONFIGURED only
     *
     * Do this for getBuffer as well.
     */
     //進入的具體的Camera3Stream對象
     //
    status_t res = returnBufferLocked(buffer, timestamp);
    if (res == OK) {
        fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true);
    }

    // Even if returning the buffer failed, we still want to signal whoever is waiting for the
    // buffer to be returned.
    mOutputBufferReturnedSignal.signal();

    return res;
}

進入 Camera3OutputStream::returnBufferLocked

//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::returnBufferLocked(
        const camera3_stream_buffer &buffer,
        nsecs_t timestamp) {
    ATRACE_CALL();

    status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true);

    if (res != OK) {
        return res;
    }

    mLastTimestamp = timestamp;
    mFrameCount++;

    return OK;
}

調用的是父類方法Camera3IOStreamBase::returnAnyBufferLocked

//frameworks\av\services\camera\libcameraservice\device3\Camera3IOStreamBase.cpp
status_t Camera3IOStreamBase::returnAnyBufferLocked(
        const camera3_stream_buffer &buffer,
        nsecs_t timestamp,
        bool output) {
    status_t res;

    // returnBuffer may be called from a raw pointer, not a sp<>, and we'll be
    // decrementing the internal refcount next. In case this is the last ref, we
    // might get destructed on the decStrong(), so keep an sp around until the
    // end of the call - otherwise have to sprinkle the decStrong on all exit
    // points.
    sp<Camera3IOStreamBase> keepAlive(this);
    decStrong(this);

    if ((res = returnBufferPreconditionCheckLocked()) != OK) {
        return res;
    }

    sp<Fence> releaseFence;
    res = returnBufferCheckedLocked(buffer, timestamp, output,
                                    &releaseFence);
   ....
    return res;
}

接着分析下returnBufferCheckedLocked

//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::returnBufferCheckedLocked(
            const camera3_stream_buffer &buffer,
            nsecs_t timestamp,
            bool output,
            /*out*/
            sp<Fence> *releaseFenceOut) {

    (void)output;
    ALOG_ASSERT(output, "Expected output to be true");

    status_t res;

    // Fence management - always honor release fence from HAL
    sp<Fence> releaseFence = new Fence(buffer.release_fence);
    int anwReleaseFence = releaseFence->dup();

    /**
     * Release the lock briefly to avoid deadlock with
     * StreamingProcessor::startStream -> Camera3Stream::isConfiguring (this
     * thread will go into StreamingProcessor::onFrameAvailable) during
     * queueBuffer
     */
    sp<ANativeWindow> currentConsumer = mConsumer;
    mLock.unlock();
   //獲取buffer.buffer對應的 ANativeWindowBuffer *anwBuffer
    ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
    /**
     * Return buffer back to ANativeWindow
     */
    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR || mDropBuffers) {
        // Cancel buffer
        if (mDropBuffers) {
            ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
        } else {
            ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
        }

        res = currentConsumer->cancelBuffer(currentConsumer.get(),
                anwBuffer,
                anwReleaseFence);
        if (res != OK) {
            ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
        }

        notifyBufferReleased(anwBuffer);
        if (mUseBufferManager) {
            // Return this buffer back to buffer manager.
            mBufferReleasedListener->onBufferReleased();
        }
    } else {
        if (mTraceFirstBuffer && (stream_type == CAMERA3_STREAM_OUTPUT)) {
            {
                char traceLog[48];
                snprintf(traceLog, sizeof(traceLog), "Stream %d: first full buffer\n", mId);
                ATRACE_NAME(traceLog);
            }
            mTraceFirstBuffer = false;
        }

        /* Certain consumers (such as AudioSource or HardwareComposer) use
         * MONOTONIC time, causing time misalignment if camera timestamp is
         * in BOOTTIME. Do the conversion if necessary. */
        res = native_window_set_buffers_timestamp(mConsumer.get(),
                mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
        if (res != OK) {
            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
                  __FUNCTION__, mId, strerror(-res), res);
            return res;
        }
       //繼續回調數據
        res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence);
        if (res != OK) {
            ALOGE("%s: Stream %d: Error queueing buffer to native window: "
                  "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
        }
    }
    mLock.lock();

    // Once a valid buffer has been returned to the queue, can no longer
    // dequeue all buffers for preallocation.
    if (buffer.status != CAMERA3_BUFFER_STATUS_ERROR) {
        mStreamUnpreparable = true;
    }

    if (res != OK) {
        close(anwReleaseFence);
    }

    *releaseFenceOut = releaseFence;

    return res;
}

接着分析下queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence)

//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
            ANativeWindowBuffer* buffer, int anwReleaseFence) {
    //consumer是Surface類型對象,其父類是 ANativeWindow
    return consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
}

consumer是配置相機流的時候相機應用傳遞給CameraService的surface對象。
代碼大致如下:

List<Surface> list = new LinkedList<Surface>();
List<Surface> surfaces = mFrameProcessor.getInputSurfaces();
for(Surface surs : surfaces) {
    mPreviewRequestBuilder[id].addTarget(surs);
    list.add(surs);
}
list.add(mImageReader[id].getSurface());
CameraCaptureSession.StateCallback captureSessionCallback =
                    new CameraCaptureSession.StateCallback(){...}
mCameraDevice[id].createCaptureSession(list, captureSessionCallback, null);

該Surface是通過SurfaceView獲取的,代碼大致代碼爲:

SurfaceView sv = new SurfaceView(this);
Surface surface = sv.getHolder().getSurface();

SurfaceView創建Surface的流程可以參考:SurfaceView創建Surface的過程及顯示過程

現在相機已經完成了幀數據的生產,接下來的分析就是surfaceFlinger刷新上屏流程(或者叫surfaceFlinger合成顯示流程),在分析之前,先從流程圖上來看一下整個過程:


上圖來源於Android4.2.2 SurfaceFlinger本地的FramebufferSurface實現真正的顯示

整個surfaceFlinger合成顯示流程的入口函數:ANativeWindow::queueBuffer(…)
其對應的函數爲Surface::hook_queueBuffer(....)1,代碼如下:

//frameworks\native\libs\gui\Surface.cpp
int Surface::hook_queueBuffer(ANativeWindow* window,
        ANativeWindowBuffer* buffer, int fenceFd) {
    Surface* c = getSelf(window);
    return c->queueBuffer(buffer, fenceFd);
}

繼續分析下Surface::queueBuffer(…)

int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) {
    ATRACE_CALL();
    ALOGV("Surface::queueBuffer");
    Mutex::Autolock lock(mMutex);
    int64_t timestamp;
    bool isAutoTimestamp = false;

    if (mTimestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) {
        timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
        isAutoTimestamp = true;
        ALOGV("Surface::queueBuffer making up timestamp: %.2f ms",
            timestamp / 1000000.0);
    } else {
        timestamp = mTimestamp;
    }
    //查找buffer是mSlots中的哪個buffer
    int i = getSlotFromBufferLocked(buffer);
    ...

    // Make sure the crop rectangle is entirely inside the buffer.
    Rect crop(Rect::EMPTY_RECT);
    mCrop.intersect(Rect(buffer->width, buffer->height), &crop);

    sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
    IGraphicBufferProducer::QueueBufferOutput output;
    IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp,
            mDataSpace, crop, mScalingMode, mTransform ^ mStickyTransform,
            fence, mStickyTransform, mEnableFrameTimestamps);

    ....
    
    nsecs_t now = systemTime();
    //mGraphicBufferProducer是BpGraphicBufferProducer類對象
    //定義在frameworks\native\libs\gui\IGraphicBufferProducer.cpp
    //創建是在BufferQueue::createBufferQueue時創建的
    status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output);
    mLastQueueDuration = systemTime() - now;
    .....
    mLastFrameNumber = mNextFrameNumber;

    mDefaultWidth = output.width;
    mDefaultHeight = output.height;
    mNextFrameNumber = output.nextFrameNumber;

    ....
    mQueueBufferCondition.broadcast();

    return err;
}

通過SurfaceView創建Surface的過程及顯示過程的分析,可知Surface中的mGraphicBufferProducer類型爲BpGraphicBufferProducer,最終是在surfaceFlinger進程創建Layer時創建的。
代碼如下:

//frameworks\native\services\surfaceflinger\Layer.cpp
void Layer::onFirstRef() {
    // Creates a custom BufferQueue for SurfaceFlingerConsumer to use
    sp<IGraphicBufferProducer> producer;
    sp<IGraphicBufferConsumer> consumer;
    //創建IGraphicBufferProducer和IGraphicBufferConsumer類對象producer、consumer
    BufferQueue::createBufferQueue(&producer, &consumer, true);
    //將producer封裝爲MonitoredProducer類對象
    mProducer = new MonitoredProducer(producer, mFlinger, this);
    //將consumer封裝爲SurfaceFlingerConsumer類對象
    mSurfaceFlingerConsumer = new SurfaceFlingerConsumer(consumer, mTextureName, this);
    mSurfaceFlingerConsumer->setConsumerUsageBits(getEffectiveUsage(0));
    //給mSurfaceFlingerConsumer設置onFrameAvailable回調函數
    mSurfaceFlingerConsumer->setContentsChangedListener(this);
    mSurfaceFlingerConsumer->setName(mName);

    if (mFlinger->isLayerTripleBufferingDisabled()) {
       //設置可以dequeue的最大Buffer數量爲2
        mProducer->setMaxDequeuedBufferCount(2);
    }

    const sp<const DisplayDevice> hw(mFlinger->getDefaultDisplayDevice());
    updateTransformHint(hw);
}

接着分析下BpGraphicBufferProducer類的queueBuffer方法

     //frameworks\native\libs\gui\IGraphicBufferProducer.cpp
    virtual status_t queueBuffer(int buf,
            const QueueBufferInput& input, QueueBufferOutput* output) {
        Parcel data, reply;

        data.writeInterfaceToken(IGraphicBufferProducer::getInterfaceDescriptor());
        data.writeInt32(buf);
        data.write(input);
       //給surfaceFlinger進程的BnGraphicBufferProducer發送消息
        status_t result = remote()->transact(QUEUE_BUFFER, data, &reply);
        if (result != NO_ERROR) {
            return result;
        }

        result = reply.read(*output);
        if (result != NO_ERROR) {
            return result;
        }

        result = reply.readInt32();
        return result;
    }

進入的surfaceFlinger進程的BnGraphicBufferProducer。

在繼續分析前,需要了解SurfaceFlinger接受處理Binder消息是在surfaceFlinger的子線程中完成的並非主線程中,surfaceFlinger主線程是用來完成合成渲染的。
在surfaceFlinger服務啓動時,會註冊一個binder線程池來接受surfaceFlinger Client發來的IPC 請求。
代碼如下:

//frameworks\native\services\surfaceflinger\main_surfaceflinger.cpp
int main(int, char**) {
    startHidlServices();

    signal(SIGPIPE, SIG_IGN);
    // When SF is launched in its own process, limit the number of
    // binder threads to 4.
    //限制binder線程數量爲4
    ProcessState::self()->setThreadPoolMaxThreadCount(4);

    // start the thread pool
    sp<ProcessState> ps(ProcessState::self());
    //新建一個子線程來處理Binder通信任務
    ps->startThreadPool();
    //創建SurfaceFlinger對象、並會觸發SurfaceFlinger::onFirstRef()方法
    // instantiate surfaceflinger
    sp<SurfaceFlinger> flinger = DisplayUtils::getInstance()->getSFInstance();

    ...
    // initialize before clients can connect
    flinger->init();

    // publish surface flinger
    //註冊surfaceFlinger給serviceManager
    sp<IServiceManager> sm(defaultServiceManager());
    sm->addService(String16(SurfaceFlinger::getServiceName()), flinger, false);
    ...
    //SurfaceFlinger主線程中啓動run函數,循環檢查vsync等消息
    // run surface flinger in this thread
    flinger->run();

    return 0;
}

從上述代碼分析中可知,BnGraphicBufferProducer::onTransact(…)運行在SurfaceFlinger子線程中,其代碼如下:

status_t BnGraphicBufferProducer::onTransact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    switch(code) {
        .....
        case QUEUE_BUFFER: {
            CHECK_INTERFACE(IGraphicBufferProducer, data, reply);

            int buf = data.readInt32();
            QueueBufferInput input(data);
            QueueBufferOutput output;
            status_t result = queueBuffer(buf, input, &output);
            reply->write(output);
            reply->writeInt32(result);

            return NO_ERROR;
        }
        case CANCEL_BUFFER: {
      .....
    }
    return BBinder::onTransact(code, data, reply, flags);
}

進入到BufferQueueProducer::queueBuffer(…)

//frameworks\native\libs\gui\BufferQueueProducer.cpp
status_t BufferQueueProducer::queueBuffer(int slot,
        const QueueBufferInput &input, QueueBufferOutput *output) {
    .....

    sp<IConsumerListener> frameAvailableListener;
    sp<IConsumerListener> frameReplacedListener;
    int callbackTicket = 0;
    uint64_t currentFrameNumber = 0;
    //創建一個空的item
    BufferItem item;
    { // Autolock scope
        Mutex::Autolock lock(mCore->mMutex);
        //queueBuffer之前做的一些檢查
        if (mCore->mIsAbandoned) {
            BQ_LOGE("queueBuffer: BufferQueue has been abandoned");
            return NO_INIT;
        }

        if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
            BQ_LOGE("queueBuffer: BufferQueue has no connected producer");
            return NO_INIT;
        }

        if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
            BQ_LOGE("queueBuffer: slot index %d out of range [0, %d)",
                    slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
            return BAD_VALUE;
        } else if (!mSlots[slot].mBufferState.isDequeued()) {
            BQ_LOGE("queueBuffer: slot %d is not owned by the producer "
                    "(state = %s)", slot, mSlots[slot].mBufferState.string());
            return BAD_VALUE;
        } else if (!mSlots[slot].mRequestBufferCalled) {
            BQ_LOGE("queueBuffer: slot %d was queued without requesting "
                    "a buffer", slot);
            return BAD_VALUE;
        }

        // If shared buffer mode has just been enabled, cache the slot of the
        // first buffer that is queued and mark it as the shared buffer.
        if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
                BufferQueueCore::INVALID_BUFFER_SLOT) {
            mCore->mSharedBufferSlot = slot;
            mSlots[slot].mBufferState.mShared = true;
        }
       //都檢查通過後,會打印這個log
        BQ_LOGV("queueBuffer: slot=%d/%" PRIu64 " time=%" PRIu64 " dataSpace=%d"
                " crop=[%d,%d,%d,%d] transform=%#x scale=%s",
                slot, mCore->mFrameCounter + 1, requestedPresentTimestamp,
                dataSpace, crop.left, crop.top, crop.right, crop.bottom,
                transform,
                BufferItem::scalingModeName(static_cast<uint32_t>(scalingMode)));

        const sp<GraphicBuffer>& graphicBuffer(mSlots[slot].mGraphicBuffer);
        Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
        Rect croppedRect(Rect::EMPTY_RECT);
        crop.intersect(bufferRect, &croppedRect);
        if (croppedRect != crop) {
            BQ_LOGE("queueBuffer: crop rect is not contained within the "
                    "buffer in slot %d", slot);
            return BAD_VALUE;
        }

        // Override UNKNOWN dataspace with consumer default
        if (dataSpace == HAL_DATASPACE_UNKNOWN) {
            dataSpace = mCore->mDefaultBufferDataSpace;
        }

        mSlots[slot].mFence = acquireFence;
        //修改mSlots[slot]的狀態位位queue
        mSlots[slot].mBufferState.queue();

        // Increment the frame counter and store a local version of it
        // for use outside the lock on mCore->mMutex.
        ++mCore->mFrameCounter;
        currentFrameNumber = mCore->mFrameCounter;
        mSlots[slot].mFrameNumber = currentFrameNumber;
        //將mSlots[slot]的相關信息賦值給item
        item.mAcquireCalled = mSlots[slot].mAcquireCalled;
        item.mGraphicBuffer = mSlots[slot].mGraphicBuffer;
        item.mCrop = crop;
        item.mTransform = transform &
                ~static_cast<uint32_t>(NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY);
        item.mTransformToDisplayInverse =
                (transform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) != 0;
        item.mScalingMode = static_cast<uint32_t>(scalingMode);
        item.mTimestamp = requestedPresentTimestamp;
        item.mIsAutoTimestamp = isAutoTimestamp;
        item.mDataSpace = dataSpace;
        item.mFrameNumber = currentFrameNumber;
        item.mSlot = slot;
        item.mFence = acquireFence;
        item.mFenceTime = acquireFenceTime;
        item.mIsDroppable = mCore->mAsyncMode ||
                mCore->mDequeueBufferCannotBlock ||
                (mCore->mSharedBufferMode && mCore->mSharedBufferSlot == slot);
        item.mSurfaceDamage = surfaceDamage;
        item.mQueuedBuffer = true;
        item.mAutoRefresh = mCore->mSharedBufferMode && mCore->mAutoRefresh;

        mStickyTransform = stickyTransform;

        // Cache the shared buffer data so that the BufferItem can be recreated.
        if (mCore->mSharedBufferMode) {
            mCore->mSharedBufferCache.crop = crop;
            mCore->mSharedBufferCache.transform = transform;
            mCore->mSharedBufferCache.scalingMode = static_cast<uint32_t>(
                    scalingMode);
            mCore->mSharedBufferCache.dataspace = dataSpace;
        }

        output->bufferReplaced = false;
        //mCore->mQueue是FIFO隊列
        if (mCore->mQueue.empty()) {
            // When the queue is empty, we can ignore mDequeueBufferCannotBlock
            // and simply queue this buffer
            //將item放入mQueue
            mCore->mQueue.push_back(item);
           //給frameAvailableListener賦值爲mCore->mConsumerListener
            frameAvailableListener = mCore->mConsumerListener;
        } else {
            // When the queue is not empty, we need to look at the last buffer
            // in the queue to see if we need to replace it
            const BufferItem& last = mCore->mQueue.itemAt(
                    mCore->mQueue.size() - 1);
            //last.mIsDroppable爲false
            if (last.mIsDroppable) {
            ....
            } else {
               //將item放入mQueue
                mCore->mQueue.push_back(item);
                //給frameAvailableListener賦值爲mCore->mConsumerListener
                frameAvailableListener = mCore->mConsumerListener;
            }
        }

        mCore->mBufferHasBeenQueued = true;
        mCore->mDequeueCondition.broadcast();
        mCore->mLastQueuedSlot = slot;

        output->width = mCore->mDefaultWidth;
        output->height = mCore->mDefaultHeight;
        output->transformHint = mCore->mTransformHint;
        output->numPendingBuffers = static_cast<uint32_t>(mCore->mQueue.size());
        output->nextFrameNumber = mCore->mFrameCounter + 1;

        ATRACE_INT(mCore->mConsumerName.string(),
                static_cast<int32_t>(mCore->mQueue.size()));
        mCore->mOccupancyTracker.registerOccupancyChange(mCore->mQueue.size());

        // Take a ticket for the callback functions
        callbackTicket = mNextCallbackTicket++;

        VALIDATE_CONSISTENCY();
    } // Autolock scope

    // It is okay not to clear the GraphicBuffer when the consumer is SurfaceFlinger because
    // it is guaranteed that the BufferQueue is inside SurfaceFlinger's process and
    // there will be no Binder call
    if (!mConsumerIsSurfaceFlinger) {
        item.mGraphicBuffer.clear();
    }

    // Don't send the slot number through the callback since the consumer shouldn't need it
    item.mSlot = BufferItem::INVALID_BUFFER_SLOT;

    // Call back without the main BufferQueue lock held, but with the callback
    // lock held so we can ensure that callbacks occur in order

    int connectedApi;
    sp<Fence> lastQueuedFence;

    { // scope for the lock
        Mutex::Autolock lock(mCallbackMutex);
        while (callbackTicket != mCurrentCallbackTicket) {
            mCallbackCondition.wait(mCallbackMutex);
        }
        //調用onFrameAvailable回調
        if (frameAvailableListener != NULL) {
            frameAvailableListener->onFrameAvailable(item);
        } else if (frameReplacedListener != NULL) {
            frameReplacedListener->onFrameReplaced(item);
        }

        connectedApi = mCore->mConnectedApi;
        lastQueuedFence = std::move(mLastQueueBufferFence);

        mLastQueueBufferFence = std::move(acquireFence);
        mLastQueuedCrop = item.mCrop;
        mLastQueuedTransform = item.mTransform;

        ++mCurrentCallbackTicket;
        mCallbackCondition.broadcast();
    }

    ....

    return NO_ERROR;
}

爲方便分析,先講解下 BufferQueue::createBufferQueue的流程及mCore->mConsumerListener的賦值流程
surfaceFlinger在創建IGraphicBufferProducerIGraphicBufferConsumer類對象時會調用 BufferQueue::createBufferQueue(...)
然後由IGraphicBufferConsumer 創建SurfaceFlingerConsumer對象

//frameworks\native\services\surfaceflinger\Layer.cpp
void Layer::onFirstRef() {
    // Creates a custom BufferQueue for SurfaceFlingerConsumer to use
    sp<IGraphicBufferProducer> producer;
    sp<IGraphicBufferConsumer> consumer;
    //創建producer和consumer對象
    BufferQueue::createBufferQueue(&producer, &consumer, true);
    //封裝producer對象爲MonitoredProducer類對象mProducer
    mProducer = new MonitoredProducer(producer, mFlinger, this);
    //裝consumer爲SurfaceFlingerConsumer類對象mSurfaceFlingerConsumer
    //mTextureName在創建Layer是通過opengl es 創建
    mSurfaceFlingerConsumer = new SurfaceFlingerConsumer(consumer, mTextureName, this);
    //設置usage標誌位
    mSurfaceFlingerConsumer->setConsumerUsageBits(getEffectiveUsage(0));
    //設置onFrameAvailable回調函數
    mSurfaceFlingerConsumer->setContentsChangedListener(this);
    //設置名稱
    mSurfaceFlingerConsumer->setName(mName);

    if (mFlinger->isLayerTripleBufferingDisabled()) {
       //設置可以dequeue的最大Buffer數量爲2
        mProducer->setMaxDequeuedBufferCount(2);
    }

    const sp<const DisplayDevice> hw(mFlinger->getDefaultDisplayDevice());
    updateTransformHint(hw);
}

SurfaceFlingerConsumer類圖如下:
在這裏插入圖片描述
SurfaceFlingerConsumer創建實現代碼如下:

//frameworks\native\services\surfaceflinger\SurfaceFlingerConsumer.h
 SurfaceFlingerConsumer(const sp<IGraphicBufferConsumer>& consumer,
         uint32_t tex, Layer* layer)
         //調用GLConsumer父類函數創建GLConsumer對象
     : GLConsumer(consumer, tex, GLConsumer::TEXTURE_EXTERNAL, false, false),
       mTransformToDisplayInverse(false), mSurfaceDamage(), mLayer(layer)
 {}

GLConsumer構造函數如下:

//frameworks\native\libs\gui\GLConsumer.cpp
//useFenceSync爲false
//isControlledByApp爲false
GLConsumer::GLConsumer(const sp<IGraphicBufferConsumer>& bq, uint32_t tex,
        uint32_t texTarget, bool useFenceSync, bool isControlledByApp) :
    //調用ConsumerBase父類構造函數,isControlledByApp爲false
    ConsumerBase(bq, isControlledByApp),
    mCurrentCrop(Rect::EMPTY_RECT),
    mCurrentTransform(0),
    mCurrentScalingMode(NATIVE_WINDOW_SCALING_MODE_FREEZE),
    mCurrentFence(Fence::NO_FENCE),
    mCurrentTimestamp(0),
    mCurrentDataSpace(HAL_DATASPACE_UNKNOWN),
    mCurrentFrameNumber(0),
    mDefaultWidth(1),
    mDefaultHeight(1),
    mFilteringEnabled(true),
    mTexName(tex),
    mUseFenceSync(useFenceSync),
    mTexTarget(texTarget),
    mEglDisplay(EGL_NO_DISPLAY),
    mEglContext(EGL_NO_CONTEXT),
    mCurrentTexture(BufferQueue::INVALID_BUFFER_SLOT),
    mAttached(true)
{
    GLC_LOGV("GLConsumer");

    memcpy(mCurrentTransformMatrix, mtxIdentity,
            sizeof(mCurrentTransformMatrix));

    mConsumer->setConsumerUsageBits(DEFAULT_USAGE_FLAGS);
}

ConsumerBase構造函數如下:

//frameworks\native\libs\gui\ConsumerBase.cpp
ConsumerBase::ConsumerBase(const sp<IGraphicBufferConsumer>& bufferQueue, bool controlledByApp) :
       mAbandoned(false),
       //將IGraphicBufferConsumer類對象bufferQueue賦值給mConsumer
       mConsumer(bufferQueue),
       mPrevFinalReleaseFence(Fence::NO_FENCE) {
   // Choose a name using the PID and a process-unique ID.
   mName = String8::format("unnamed-%d-%d", getpid(), createProcessUniqueId());

   // Note that we can't create an sp<...>(this) in a ctor that will not keep a
   // reference once the ctor ends, as that would cause the refcount of 'this'
   // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
   // that's what we create.

   wp<ConsumerListener> listener = static_cast<ConsumerListener*>(this);
  //創建一個BufferQueue::ProxyConsumerListener對象proxy
  //listener爲ConsumerBase類對象弱引用指針
   sp<IConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
  //調用IGraphicBufferConsumer類對象consumerConnect()方法
   status_t err = mConsumer->consumerConnect(proxy, controlledByApp);
   if (err != NO_ERROR) {
       CB_LOGE("ConsumerBase: error connecting to BufferQueue: %s (%d)",
               strerror(-err), err);
   } else {
       mConsumer->setConsumerName(mName);
   }

進入到BufferQueueConsumer的consumerConnect方法

//frameworks\native\libs\gui\include\gui\BufferQueueConsumer.h
    virtual status_t consumerConnect(const sp<IConsumerListener>& consumer,
            bool controlledByApp) {
        return connect(consumer, controlledByApp);
    }

進入到BufferQueueConsumer

//frameworks\native\libs\gui\BufferQueueConsumer.cpp
//controlledByApp爲false
status_t BufferQueueConsumer::connect(
        const sp<IConsumerListener>& consumerListener, bool controlledByApp) {
    ...
    //爲mCore->mConsumerListener賦值,consumerListener爲BufferQueue::ProxyConsumerListener類對象
    mCore->mConsumerListener = consumerListener;
    mCore->mConsumerControlledByApp = controlledByApp;

    return NO_ERROR;
}

至此分析完成了mCore->mConsumerListener的賦值過程。

由此看見BufferQueueProducer::queueBuffer(…)中調用的frameAvailableListener->onFrameAvailable(item);調用的是BufferQueue::ProxyConsumerListener的onFrameAvailable方法

//frameworks\native\libs\gui\BufferQueue.cpp
void BufferQueue::ProxyConsumerListener::onFrameAvailable(
        const BufferItem& item) {
    //mConsumerListener爲ConsumerBase類對象
    sp<ConsumerListener> listener(mConsumerListener.promote());
    if (listener != NULL) {
        listener->onFrameAvailable(item);
    }
}

進入ConsumerBase

void ConsumerBase::onFrameAvailable(const BufferItem& item) {
    CB_LOGV("onFrameAvailable");

    sp<FrameAvailableListener> listener;
    { // scope for the lock
        Mutex::Autolock lock(mFrameAvailableMutex);
        //Layer::onFirstRef()中mSurfaceFlingerConsumer->setContentsChangedListener(this)設置的成員變量
        listener = mFrameAvailableListener.promote();
    }
    //調用surfaceFlinger註冊的onFrameAvailable
    //即Layer::onFrameAvailable(const BufferItem& item)方法
    if (listener != NULL) {
        CB_LOGV("actually calling onFrameAvailable");
        listener->onFrameAvailable(item);
    }
}

經過上邊的分析,listener->onFrameAvailable(item)調用的
Layer::onFrameAvailable(const BufferItem& item)方法,代碼如下:

void Layer::onFrameAvailable(const BufferItem& item) {
    // Add this buffer from our internal queue tracker
    { // Autolock scope
        Mutex::Autolock lock(mQueueItemLock);
        mFlinger->mInterceptor.saveBufferUpdate(this, item.mGraphicBuffer->getWidth(),
                item.mGraphicBuffer->getHeight(), item.mFrameNumber);
        // Reset the frame number tracker when we receive the first buffer after
        // a frame number reset
        if (item.mFrameNumber == 1) {
            mLastFrameNumberReceived = 0;
        }

        // Ensure that callbacks are handled in order
        while (item.mFrameNumber != mLastFrameNumberReceived + 1) {
            status_t result = mQueueItemCondition.waitRelative(mQueueItemLock,
                    ms2ns(500));
            if (result != NO_ERROR) {
                ALOGE("[%s] Timed out waiting on callback", mName.string());
            }
        }
        //將一幀圖形緩入列
        mQueueItems.push_back(item);
        android_atomic_inc(&mQueuedFrames);

        // Wake up any pending callbacks
        mLastFrameNumberReceived = item.mFrameNumber;
        mQueueItemCondition.broadcast();
    }
    //通知SurfaceFlinger,當前Layer有一幀圖像緩存(相機預覽數據)入列,
    //請求surfaceFinger來做最終的刷屏顯示
    mFlinger->signalLayerUpdate();
}

至此完成了幀數據的生成過程,接下來就是幀數據的消費過程,消費過程需要等待下一個Vsync的到來纔去執行

通過上述分析可知,CameraService生產的一幀數據最終通過Surface的queueBuffer入列到SurfaceFlinger進程對應Layer的幀緩衝隊列—mQueueItems中。然後SurfaceFlinger會等待下次Vsync到來再將幀數據刷新到設備屏幕上

接着分析下mFlinger->signalLayerUpdate()

//frameworks\native\services\surfaceflinger\SurfaceFlinger.cpp
void SurfaceFlinger::signalLayerUpdate() {
    mEventQueue.invalidate();
}

接着分析下:

//frameworks\native\services\surfaceflinger\MessageQueue.cpp
void MessageQueue::invalidate() {
    mEvents->requestNextVsync();
}

mEvents->requestNextVsync()等待下一Vsync到來。
關於Vsync相關知識請參考下邊的兩篇文章,本文就不做詳細分析了。
Android O 以前版本的Vsync的產生及回調到surfaceFlinger的流程,請參考
Android垂直同步信號VSync的產生及傳播結構詳解
AndroidO Vsync的產生及傳遞到surfaceFlinger流程,請參考
AndroidO Vsync的產生及分發給surfaceFlinger流程學習

接着繼續分析 mEvents->requestNextVsync()

//frameworks\native\services\surfaceflinger\EventThread.cpp
void EventThread::requestNextVsync(
        const sp<EventThread::Connection>& connection) {
    Mutex::Autolock _l(mLock);

    mFlinger.resyncWithRateLimit();
    //當connection->count爲-1時,表示當前connection不需要等待Vsync(或者是對vsync不感興趣)
    //當connection->count爲0時,表示當前connection需要等待vysnc(或者是對vsync感興趣,且只感情興趣一次)
    //當connection->count爲>1時,表示當前connection持續接受vsync
    if (connection->count < 0) {
        connection->count = 0;
        //connection->count狀態發生變化,通知EventThread線程
        mCondition.broadcast();
    }
}

當vsync到來時(接受消息是在另外一個線程中):

//frameworks\native\services\surfaceflinger\EventThread.cpp
void EventThread::onVSyncEvent(nsecs_t timestamp) {
    Mutex::Autolock _l(mLock);
    //更新mVSyncEvent狀態
    mVSyncEvent[0].header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
    mVSyncEvent[0].header.id = 0;
    mVSyncEvent[0].header.timestamp = timestamp;
    mVSyncEvent[0].vsync.count++;
    //通知EventThread
    mCondition.broadcast();
}

EventThread線程會在waitForEvent(&event) 中等待vsync事件,代碼如下

//frameworks\native\services\surfaceflinger\EventThread.cpp
bool EventThread::threadLoop() {
    DisplayEventReceiver::Event event;
    Vector< sp<EventThread::Connection> > signalConnections;
     //等待vsync事件。
     //事件到來時,會返回signalConnections 和event
    signalConnections = waitForEvent(&event);

    // dispatch events to listeners...
    const size_t count = signalConnections.size();
    for (size_t i=0 ; i<count ; i++) {
        const sp<Connection>& conn(signalConnections[i]);
        // now see if we still need to report this event
        //收到vsync後,通過postEvent分發事件
        status_t err = conn->postEvent(event);
        ...
    }
    return true;
}

接着分析下EventThread線程等待vsync事件的函數waitForEvent(&event)

//\frameworks\native\services\surfaceflinger\EventThread.cpp
Vector< sp<EventThread::Connection> > EventThread::waitForEvent(
        DisplayEventReceiver::Event* event)
{
    Mutex::Autolock _l(mLock);
    //vsync到來時,需要被通知的Connections
    Vector< sp<EventThread::Connection> > signalConnections;

    do {
        bool eventPending = false;
        bool waitForVSync = false;

        size_t vsyncCount = 0;
        nsecs_t timestamp = 0;
        for (int32_t i=0 ; i<(int32_t)mVSyncEvent.size(); i++) {
           //vsync發生時間戳
            timestamp = mVSyncEvent[i].header.timestamp;
            if (timestamp) {
                // we have a vsync event to dispatch
                if (mInterceptVSyncs) {
                    mFlinger.mInterceptor.saveVSyncEvent(timestamp);
                }
                *event = mVSyncEvent[i];
                mVSyncEvent[i].header.timestamp = 0;
                //更新vsyc計數
                vsyncCount = mVSyncEvent[i].vsync.count;
                break;
            }
        }
        //timestamp不爲0
        if (!timestamp) {
            // no vsync event, see if there are some other event
            //對其他的event不感興趣
            ....
        }

        // find out connections waiting for events
        size_t count = mDisplayEventConnections.size();
        //遍歷所有的mDisplayEventConnections,查找對vsync感興趣的Connections
        for (size_t i=0 ; i<count ; i++) {
            sp<Connection> connection(mDisplayEventConnections[i].promote());
            if (connection != NULL) {
                bool added = false;
                //表明當前connection對vsync感興趣,或者當前connection需要等待vsync
                //在requestNextVsync時,將connection->count設置爲了0
                if (connection->count >= 0) {
                    // we need vsync events because at least
                    // one connection is waiting for it
                    waitForVSync = true;
                    if (timestamp) {
                        // we consume the event only if it's time
                        // (ie: we received a vsync event)
                        //在requestNextVsync時,將connection->count設置爲了0
                        if (connection->count == 0) {
                            // fired this time around
                            //修改標誌位,不再對vsync感興趣了
                            connection->count = -1;
                            //將當前connection添加到signalConnections
                            signalConnections.add(connection);
                            added = true;
                        } else if (connection->count == 1 ||
                                (vsyncCount % connection->count) == 0) {
                            // continuous event, and time to report it
                            signalConnections.add(connection);
                            added = true;
                        }
                    }
                }

                ....
            } else {
                // we couldn't promote this reference, the connection has
                // died, so clean-up!
                mDisplayEventConnections.removeAt(i);
                --i; --count;
            }
        }

        // Here we figure out if we need to enable or disable vsyncs
        if (timestamp && !waitForVSync) {
            // we received a VSYNC but we have no clients
            // don't report it, and disable VSYNC events
            disableVSyncLocked();
        } else if (!timestamp && waitForVSync) {
            // we have at least one client, so we want vsync enabled
            // (TODO: this function is called right after we finish
            // notifying clients of a vsync, so this call will be made
            // at the vsync rate, e.g. 60fps.  If we can accurately
            // track the current state we could avoid making this call
            // so often.)
            enableVSyncLocked();
        }

        // note: !timestamp implies signalConnections.isEmpty(), because we
        // don't populate signalConnections if there's no vsync pending
        //如果timestamp且eventPending爲0,將當前線程掛起
        if (!timestamp && !eventPending) {
            // wait for something to happen
            if (waitForVSync) {
                // This is where we spend most of our time, waiting
                // for vsync events and new client registrations.
                //
                // If the screen is off, we can't use h/w vsync, so we
                // use a 16ms timeout instead.  It doesn't need to be
                // precise, we just need to keep feeding our clients.
                //
                // We don't want to stall if there's a driver bug, so we
                // use a (long) timeout when waiting for h/w vsync, and
                // generate fake events when necessary.
                bool softwareSync = mUseSoftwareVSync;
                nsecs_t timeout = softwareSync ? ms2ns(16) : ms2ns(1000);

                if (mCondition.waitRelative(mLock, timeout) == TIMED_OUT) {
                    if (!softwareSync) {
                        ALOGW("Timed out waiting for hw vsync; faking it");
                    }
                    // FIXME: how do we decide which display id the fake
                    // vsync came from ?
                    mVSyncEvent[0].header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
                    mVSyncEvent[0].header.id = DisplayDevice::DISPLAY_PRIMARY;
                    mVSyncEvent[0].header.timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
                    mVSyncEvent[0].vsync.count++;
                }
            } else {
                // Nobody is interested in vsync, so we just want to sleep.
                // h/w vsync should be disabled, so this will wait until we
                // get a new connection, or an existing connection becomes
                // interested in receiving vsync again.
                mCondition.wait(mLock);
            }
        }
      //在signalConnections不爲空時,跳出循環
    } while (signalConnections.isEmpty());

    // here we're guaranteed to have a timestamp and some connections to signal
    // (The connections might have dropped out of mDisplayEventConnections
    // while we were asleep, but we'll still have strong references to them.)
    //返回signalConnections
    return signalConnections;
}

postEvent這裏就不詳細介紹了,,詳細傳遞過程請參考Android P 圖形顯示系統(六) SurfaceFlinger合成流程(一)

消息最終傳遞到SurfaceFlinger::onMessageReceived(int32_t what)
代碼如下:

//frameworks\native\services\surfaceflinger\SurfaceFlinger.cpp
void SurfaceFlinger::onMessageReceived(int32_t what) {
   ATRACE_CALL();
   switch (what) {
       case MessageQueue::INVALIDATE: {
           bool frameMissed = !mHadClientComposition &&
                   mPreviousPresentFence != Fence::NO_FENCE &&
                   (mPreviousPresentFence->getSignalTime() ==
                           Fence::SIGNAL_TIME_PENDING);
           ATRACE_INT("FrameMissed", static_cast<int>(frameMissed));
           if (mPropagateBackpressure && frameMissed) {
               signalLayerUpdate();
               break;
           }

           // Now that we're going to make it to the handleMessageTransaction()
           // call below it's safe to call updateVrFlinger(), which will
           // potentially trigger a display handoff.
           updateVrFlinger();

           bool refreshNeeded = handleMessageTransaction();
           refreshNeeded |= handleMessageInvalidate();
           refreshNeeded |= mRepaintEverything;
           if (refreshNeeded) {
               // Signal a refresh if a transaction modified the window state,
               // a new buffer was latched, or if HWC has requested a full
               // repaint
               signalRefresh();
           }
           break;
       }
       case MessageQueue::REFRESH: {
           handleMessageRefresh();
           break;
       }
   }
}

這裏就不分析handleMessageTransaction(),
詳細過程請參考2

Android P 圖形顯示系統(七) SurfaceFlinger合成流程(二)
Android P 圖形顯示系統(八) SurfaceFlinger合成流程(三)
下邊分析下handleMessageInvalidate()

//frameworks\native\services\surfaceflinger\SurfaceFlinger.cpp
bool SurfaceFlinger::handleMessageInvalidate() {
   ATRACE_CALL();
   return handlePageFlip();
}

下邊接着分析下handlePageFlip()

//frameworks\native\services\surfaceflinger\SurfaceFlinger.cpp
bool SurfaceFlinger::handlePageFlip()
{
   ALOGV("handlePageFlip");

   nsecs_t latchTime = systemTime();

   bool visibleRegions = false;
   bool frameQueued = false;
   bool newDataLatched = false;

   // Store the set of layers that need updates. This set must not change as
   // buffers are being latched, as this could result in a deadlock.
   // Example: Two producers share the same command stream and:
   // 1.) Layer 0 is latched
   // 2.) Layer 0 gets a new frame
   // 2.) Layer 1 gets a new frame
   // 3.) Layer 1 is latched.
   // Display is now waiting on Layer 1's frame, which is behind layer 0's
   // second frame. But layer 0's second frame could be waiting on display.
   //遍歷mDrawingState中的所有layer
   //如果layer有新的Frame且shouldPresentNow爲true需要立刻顯示
   //則將該Layer入列到mLayersWithQueuedFrames中
   mDrawingState.traverseInZOrder([&](Layer* layer) {
       if (layer->hasQueuedFrame()) {
           frameQueued = true;
           if (layer->shouldPresentNow(mPrimaryDispSync)) {
               mLayersWithQueuedFrames.push_back(layer);
           } else {
               layer->useEmptyDamage();
           }
       } else {
           layer->useEmptyDamage();
       }
   });
   //遍歷所有mLayersWithQueuedFrames中的Layer
   //通過latchBuffer獲取該Layer的buffer並綁定到紋理上
   for (auto& layer : mLayersWithQueuedFrames) {
      //獲取當前buffer並綁定到紋理上
       const Region dirty(layer->latchBuffer(visibleRegions, latchTime));
       //作用不暫明白
       layer->useSurfaceDamage();
        //作用不暫明白
       invalidateLayerStack(layer, dirty);
       if (layer->isBufferLatched()) {
          //更新標誌位
           newDataLatched = true;
       }
   }

   mVisibleRegionsDirty |= visibleRegions;

   // If we will need to wake up at some time in the future to deal with a
   // queued frame that shouldn't be displayed during this vsync period, wake
   // up during the next vsync period to check again.
   if (frameQueued && (mLayersWithQueuedFrames.empty() || !newDataLatched)) {
       signalLayerUpdate();
   }

   // Only continue with the refresh if there is actually new work to do
   return !mLayersWithQueuedFrames.empty() && newDataLatched;
}

下邊分析下layer->latchBuffer(visibleRegions, latchTime)

//frameworks\native\services\surfaceflinger\Layer.cpp
Region Layer::latchBuffer(bool& recomputeVisibleRegions, nsecs_t latchTime)
{
  .....

   // Capture the old state of the layer for comparisons later
   const State& s(getDrawingState());
   const bool oldOpacity = isOpaque(s);
   //將當前幀置爲舊幀
   sp<GraphicBuffer> oldActiveBuffer = mActiveBuffer;
   ....
   // This boolean is used to make sure that SurfaceFlinger's shadow copy
   // of the buffer queue isn't modified when the buffer queue is returning
   // BufferItem's that weren't actually queued. This can happen in shared
   // buffer mode.
   bool queuedBuffer = false;
   LayerRejecter r(mDrawingState, getCurrentState(), recomputeVisibleRegions,
                   getProducerStickyTransform() != 0, mName.string(),
                   mOverrideScalingMode, mFreezeGeometryUpdates);
                   
    //獲取buffer並綁定到紋理上   
   status_t updateResult = mSurfaceFlingerConsumer->updateTexImage(&r,
           mFlinger->mPrimaryDispSync, &mAutoRefresh, &queuedBuffer,
           mLastFrameNumberReceived);

   ......
   //更新mActiveBuffer
   // update the active buffer
   mActiveBuffer = mSurfaceFlingerConsumer->getCurrentBuffer(
           &mActiveBufferSlot);
   if (mActiveBuffer == NULL) {
       // this can only happen if the very first buffer was rejected.
       return outDirtyRegion;
   }

   mBufferLatched = true;
   mPreviousFrameNumber = mCurrentFrameNumber;
   mCurrentFrameNumber = mSurfaceFlingerConsumer->getFrameNumber();

  
   mRefreshPending = true;
   mFrameLatencyNeeded = true;

  ....

   return outDirtyRegion;
}

下邊重點分析下mSurfaceFlingerConsumer->updateTexImage(.....)

status_t SurfaceFlingerConsumer::updateTexImage(BufferRejecter* rejecter,
       const DispSync& dispSync, bool* autoRefresh, bool* queuedBuffer,
       uint64_t maxFrameNumber)
{
   ATRACE_CALL();
   ALOGV("updateTexImage");
   Mutex::Autolock lock(mMutex);

   if (mAbandoned) {
       ALOGE("updateTexImage: GLConsumer is abandoned!");
       return NO_INIT;
   }

   // Make sure the EGL state is the same as in previous calls.
   status_t err = checkAndUpdateEglStateLocked();
   if (err != NO_ERROR) {
       return err;
   }

   BufferItem item;

   // Acquire the next buffer.
   // In asynchronous mode the list is guaranteed to be one buffer
   // deep, while in synchronous mode we use the oldest buffer.
   //獲取buffer
   err = acquireBufferLocked(&item, computeExpectedPresent(dispSync),
           maxFrameNumber);
 ....
   //將item.mAutoRefresh更新到autoRefresh中
   if (autoRefresh) {
       *autoRefresh = item.mAutoRefresh;
   }
   //將 item.mQueuedBuffer更新到queuedBuffer中
   if (queuedBuffer) {
       *queuedBuffer = item.mQueuedBuffer;
   }

   // We call the rejecter here, in case the caller has a reason to
   // not accept this buffer.  This is used by SurfaceFlinger to
   // reject buffers which have the wrong size
   int slot = item.mSlot;
   //對獲取的mGraphicBuffer做檢查,不合格的拒絕掉
   if (rejecter && rejecter->reject(mSlots[slot].mGraphicBuffer, item)) {
       releaseBufferLocked(slot, mSlots[slot].mGraphicBuffer, EGL_NO_SYNC_KHR);
       return BUFFER_REJECTED;
   }

   // Release the previous buffer.
   //更新當前buffer,釋放舊buffer
#ifdef USE_HWC2
   err = updateAndReleaseLocked(item, &mPendingRelease);
#else
   err = updateAndReleaseLocked(item);
#endif
   if (err != NO_ERROR) {
       return err;
   }

   if (!SyncFeatures::getInstance().useNativeFenceSync()) {
       // Bind the new buffer to the GL texture.
       //
       // Older devices require the "implicit" synchronization provided
       // by glEGLImageTargetTexture2DOES, which this method calls.  Newer
       // devices will either call this in Layer::onDraw, or (if it's not
       // a GL-composited layer) not at all.
       //綁定到紋理上
       err = bindTextureImageLocked();
   }

   return err;
}

接下來分析下

status_t GLConsumer::updateAndReleaseLocked(const BufferItem& item,
       PendingRelease* pendingRelease)
{
   status_t err = NO_ERROR;

   int slot = item.mSlot;

  ......

   // Ensure we have a valid EglImageKHR for the slot, creating an EglImage
   // if nessessary, for the gralloc buffer currently in the slot in
   // ConsumerBase.
   // We may have to do this even when item.mGraphicBuffer == NULL (which
   // means the buffer was previously acquired).
   
   //根據獲取的BufferItem創建 mEglSlots[slot].mEglImage對象
   err = mEglSlots[slot].mEglImage->createIfNeeded(mEglDisplay, item.mCrop);
  .....

   // Do whatever sync ops we need to do before releasing the old slot.
   //如果獲取的slot和當前的mCurrentTexture不一致,需要等待mCurrentTexture的同步信號
   if (slot != mCurrentTexture) {
       err = syncForReleaseLocked(mEglDisplay);
       ......
   }

   GLC_LOGV("updateAndRelease: (slot=%d buf=%p) -> (slot=%d buf=%p)",
           mCurrentTexture, mCurrentTextureImage != NULL ?
                   mCurrentTextureImage->graphicBufferHandle() : 0,
           slot, mSlots[slot].mGraphicBuffer->handle);

   // Hang onto the pointer so that it isn't freed in the call to
   // releaseBufferLocked() if we're in shared buffer mode and both buffers are
   // the same.
   sp<EglImage> nextTextureImage = mEglSlots[slot].mEglImage;

   // release old buffer
   if (mCurrentTexture != BufferQueue::INVALID_BUFFER_SLOT) {
       if (pendingRelease == nullptr) {
         //釋放舊的buffer
           status_t status = releaseBufferLocked(
                   mCurrentTexture, mCurrentTextureImage->graphicBuffer(),
                   mEglDisplay, mEglSlots[mCurrentTexture].mEglFence);
                   .....
       }
       ......
   }
 //更新mCurrentTexture、mCurrentTextureImage等成員變量
   // Update the GLConsumer state.
   mCurrentTexture = slot;
   mCurrentTextureImage = nextTextureImage;
   mCurrentCrop = item.mCrop;
   mCurrentTransform = item.mTransform;
   mCurrentScalingMode = item.mScalingMode;
   mCurrentTimestamp = item.mTimestamp;
   mCurrentDataSpace = item.mDataSpace;
   mCurrentFence = item.mFence;
   mCurrentFenceTime = item.mFenceTime;
   mCurrentFrameNumber = item.mFrameNumber;

   computeCurrentTransformMatrixLocked();

   return err;
}

下邊分析下bindTextureImageLocked

status_t GLConsumer::bindTextureImageLocked() {
 
 //綁定紋理
   glBindTexture(mTexTarget, mTexName);

  //在此調用createIfNeeded,確保mCurrentTextureImage爲有效值
   status_t err = mCurrentTextureImage->createIfNeeded(mEglDisplay,
                                                       mCurrentCrop);
  //綁定mCurrentTextureImage到mTexName紋理上
   mCurrentTextureImage->bindToTextureTarget(mTexTarget);

.....
   //插入一個同步信號
   // Wait for the new buffer to be ready.
   return doGLFenceWaitLocked();
}

至此完成了latchBuffer的分析,也完成了handleMessageInvalidate的分析
該過程主要完成了buffer的獲取並綁定到紋理上。

接着surfaceFlinger會調用
signalRefresh,發送消息來完成剩餘的合成顯示流程,
其對應的處理函數爲handleMessageRefresh()
現在直接分析下handleMessageRefresh()

//frameworks\native\services\surfaceflinger\SurfaceFlinger.cpp
void SurfaceFlinger::handleMessageRefresh() {
   ATRACE_CALL();

   mRefreshPending = false;

   nsecs_t refreshStartTime = systemTime(SYSTEM_TIME_MONOTONIC);

   preComposition(refreshStartTime);
   rebuildLayerStacks();
   setUpHWComposer();
   doDebugFlashRegions();
   doComposition();
   postComposition(refreshStartTime);

   mPreviousPresentFence = mHwc->getPresentFence(HWC_DISPLAY_PRIMARY);

   mHadClientComposition = false;
   for (size_t displayId = 0; displayId < mDisplays.size(); ++displayId) {
       const sp<DisplayDevice>& displayDevice = mDisplays[displayId];
       mHadClientComposition = mHadClientComposition ||
               mHwc->hasClientComposition(displayDevice->getHwcDisplayId());
   }

   mLayersWithQueuedFrames.clear();
}

另外一篇講述surfaceFinger的文章
SurfaceFlinger中Buffer的創建與顯示
該片文章更側重分析GraphicBuffer的分配與顯示過程
在這裏插入圖片描述
待續。。。。。


  1. 賦值在Surface的構造函數中:ANativeWindow::queueBuffer = hook_queueBuffer; ↩︎

  2. 這兩篇文章突然設置爲vip可見了,真是無語,以後再詳細研究下這個函數吧 ↩︎

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章