MediaRecorder創建Surface流程學習

Android 配置相機流的時候需要給cameraservice傳遞一個surface對象,
在分析SnapdragonCamera配置HFR錄像流的過程中,發現SnapdragonCamera是通過創建MediaRecorder來獲取surface的

mMediaRecorder.getSurface()

本文主要分析下MediaRecorder創建surface的過程,順便整理下MediaRecorder的代碼邏輯架構

apk 創建代碼

    //frameworks\base\media\java\android\media
    private void setUpMediaRecorder(int cameraId) throws IOException {
        Log.d(TAG, "setUpMediaRecorder");
        ...
        if (mMediaRecorder == null) mMediaRecorder = new MediaRecorder();

        mMediaRecorder.setVideoSource(MediaRecorder.VideoSource.SURFACE);
        mMediaRecorder.setOutputFormat(mProfile.fileFormat);
        mMediaRecorder.setOutputFile(fileName);
        mMediaRecorder.setVideoFrameRate(mProfile.videoFrameRate);
        mMediaRecorder.setVideoEncodingBitRate(mProfile.videoBitRate);
        mMediaRecorder.setVideoSize(mProfile.videoFrameWidth, mProfile.videoFrameHeight);
        mMediaRecorder.setVideoEncoder(videoEncoder);
        mMediaRecorder.setMaxDuration(mMaxVideoDurationInMs);

        try {
            mMediaRecorder.setMaxFileSize(maxFileSize);
        } catch (RuntimeException exception) {
        }
        try {
            mMediaRecorder.prepare();
        } catch (IOException e) {
        }
    }

進入MediaRecorder.java,主要講述構造函數MediaRecorder()和prepare()函數

   //frameworks\base\media\java\android\media\MediaRecorder.java
       public MediaRecorder() {
       ....
        String packageName = ActivityThread.currentPackageName();
        /* Native setup requires a weak reference to our object.
         * It's easier to create it here than in C++.
         */
        native_setup(new WeakReference<MediaRecorder>(this), packageName,
                ActivityThread.currentOpPackageName());
    }
    
    public void prepare() throws IllegalStateException, IOException
    {  
        ...
        _prepare();
    }

_prepare是native 方法,定義如下:

  //frameworks\base\media\java\android\media\MediaRecorder.java
  
    private native final void native_setup(Object mediarecorder_this,
            String clientName, String opPackageName) throws IllegalStateException;
            
    private native void _prepare() throws IllegalStateException, IOException;

進入JNI 層實現

//frameworks\base\media\jni\android_media_MediaRecorder.cpp
static void android_media_MediaRecorder_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
                                       jstring packageName, jstring opPackageName)
{

  ...
  // 創建MediaRecorder
  sp<MediaRecorder> mr = new MediaRecorder(String16(opPackageNameStr.c_str()));

  // create new listener and give it to MediaRecorder
  sp<JNIMediaRecorderListener> listener = new JNIMediaRecorderListener(env, thiz, weak_this);
  mr->setListener(listener);

  // pass client package name for permissions tracking
  mr->setClientName(clientName);

}

static void android_media_MediaRecorder_prepare(JNIEnv *env, jobject thiz)
{

  sp<MediaRecorder> mr = getMediaRecorder(env, thiz);

//該處獲取的surface爲null
  jobject surface = env->GetObjectField(thiz, fields.surface);
  if (surface != NULL) {
     ....
  }
  //調用mr->prepare(),mediarecoder的surface是在該方法中完成創建的。
  process_media_recorder_call(env, mr->prepare(), "java/io/IOException", "prepare failed.");
}

進到mediarecorder.cpp類

//frameworks\av\media\libmedia\mediarecorder.cpp
//構造函數
MediaRecorder::MediaRecorder(const String16& opPackageName) : mSurfaceMediaSource(NULL)
{
    ALOGV("constructor");
   //獲取IMediaPlayerService服務代理對象(實際是BpMediaPlayerService類對象)
    const sp<IMediaPlayerService> service(getMediaPlayerService());
    if (service != NULL) {
       //通過IMediaPlayerService服務創建IMediaRecorder(實際是BpMediaRecorder類)對象mMediaRecorder
        mMediaRecorder = service->createMediaRecorder(opPackageName);
    }
    if (mMediaRecorder != NULL) {
        mCurrentState = MEDIA_RECORDER_IDLE;
    }


    doCleanUp();
}

prepare 方法

//frameworks\av\media\libmedia\mediarecorder.cpp
status_t MediaRecorder::prepare()
{
    ALOGV("prepare");
    ...
    status_t ret = mMediaRecorder->prepare();
   ...
    return ret;
}

獲取IMediaPlayerService服務代理的實現 代碼:

// establish binder interface to MediaPlayerService
/*static*/const sp<IMediaPlayerService>
IMediaDeathNotifier::getMediaPlayerService()
{
    ALOGV("getMediaPlayerService");
    Mutex::Autolock _l(sServiceLock);
    if (sMediaPlayerService == 0) {
        sp<IServiceManager> sm = defaultServiceManager();
        sp<IBinder> binder;
        do {
            binder = sm->getService(String16("media.player"));
            if (binder != 0) {
                break;
            }
            ALOGW("Media player service not published, waiting...");
            usleep(500000); // 0.5 s
        } while (true);

        if (sDeathNotifier == NULL) {
            sDeathNotifier = new DeathNotifier();
        }
        binder->linkToDeath(sDeathNotifier);
        sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
    }
    ALOGE_IF(sMediaPlayerService == 0, "no media player service!?");
    return sMediaPlayerService;
}

IMediaPlayerService服務實現代碼:

//rc 文件
//frameworks\av\media\mediaserver\mediaserver.rc
service media /system/bin/mediaserver
    class main
    user media
    group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
    ioprio rt 4
    writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks

進程入口:

//frameworks\av\media\mediaserver\main_mediaserver.cpp
int main(int argc __unused, char **argv __unused)
{
    signal(SIGPIPE, SIG_IGN);

    sp<ProcessState> proc(ProcessState::self());
    sp<IServiceManager> sm(defaultServiceManager());
    ALOGI("ServiceManager: %p", sm.get());
    InitializeIcuOrDie();
    //將服務註冊給IServiceManager
    MediaPlayerService::instantiate();
    ResourceManagerService::instantiate();
    registerExtensions();
    ProcessState::self()->startThreadPool();
    IPCThreadState::self()->joinThreadPool();
}

MediaPlayerService註冊實現代碼

//frameworks\av\media\libmediaplayerservice\MediaPlayerService.cpp
void MediaPlayerService::instantiate() {
    defaultServiceManager()->addService(
            String16("media.player"), new MediaPlayerService());
}

至此可以看出,將MediaPlayerService註冊給了serviceManager
繼續分析 service->createMediaRecorder(opPackageName)

進入到IMediaPlayerService

//frameworks\av\media\libmedia\IMediaPlayerService.cpp
    virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName)
    {
        Parcel data, reply;
        data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
        data.writeString16(opPackageName);
        //Binder IPC
        remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
        return interface_cast<IMediaRecorder>(reply.readStrongBinder());
    }

進入到BnMediaPlayerService

status_t BnMediaPlayerService::onTransact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    switch (code) {
        ....
        case CREATE_MEDIA_RECORDER: {
            CHECK_INTERFACE(IMediaPlayerService, data, reply);
            const String16 opPackageName = data.readString16();
            sp<IMediaRecorder> recorder = createMediaRecorder(opPackageName);
            reply->writeStrongBinder(IInterface::asBinder(recorder));
            return NO_ERROR;
        } break;
        ....
        default:
            return BBinder::onTransact(code, data, reply, flags);
    }
}

進入到BnMediaPlayerService的實現類MediaPlayerService

sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(const String16 &opPackageName)
{
    pid_t pid = IPCThreadState::self()->getCallingPid();
    //創建MediaRecorderClient對象
    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid, opPackageName);
    wp<MediaRecorderClient> w = recorder;
    Mutex::Autolock lock(mLock);
    mMediaRecorderClients.add(w);
    ALOGV("Create new media recorder client from pid %d", pid);
    return recorder;
}

由此可見mediarecorder.cpp中createMediaRecorder(opPackageName)返回的其實是MediaRecorderClient對應的代理對象BpMediaRecorder。

MediaRecorderClient類繼承實現了IMediaRecorder,類圖下如:

在這裏插入圖片描述
MediaRecorderClient 內部成員變量mRecorder是在其構造函數中初始創建的,類型爲:StagefrightRecorder

//frameworks\av\media\libmediaplayerservice\MediaRecorderClient.cpp
MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid,
        const String16& opPackageName)
{
    ALOGV("Client constructor");
    mPid = pid;
    mRecorder = AVMediaServiceFactory::get()->createStagefrightRecorder(opPackageName);
    mMediaPlayerService = service;
}

createStagefrightRecorder創建的是StagefrightRecorder

//frameworks\av\media\libmediaplayerservice\StagefrightRecorder.cpp
StagefrightRecorder *AVMediaServiceFactory::createStagefrightRecorder(
        const String16 &opPackageName) {
    return new StagefrightRecorder(opPackageName);
}

類圖如下
在這裏插入圖片描述
MediaRecorderClient是通過mRecorder對象完成prepare等操作的,代碼如下:

//frameworks\av\media\libmediaplayerservice\MediaRecorderClient.cpp
status_t MediaRecorderClient::prepare()
{
    ALOGV("prepare");
    Mutex::Autolock lock(mLock);
    if (mRecorder == NULL) {
        ALOGE("recorder is not initialized");
        return NO_INIT;
    }
    return mRecorder->prepare();
}

進入StagefrightRecorder.cpp

//frameworks\av\media\libmediaplayerservice\StagefrightRecorder.cpp
status_t StagefrightRecorder::prepare() {
    ALOGV("prepare");
    Mutex::Autolock autolock(mLock);
    //apk配置過mVideoSource爲VIDEO_SOURCE_SURFACE
    if (mVideoSource == VIDEO_SOURCE_SURFACE) {
        return prepareInternal();
    }
    return OK;
}

進入prepareInternal()函數,該方法是初始化所需的編碼器,本文配置的是setupMPEG4orWEBMRecording

//frameworks\av\media\libmediaplayerservice\StagefrightRecorder.cpp
status_t StagefrightRecorder::prepareInternal() {
    ALOGV("prepare");
    if (mOutputFd < 0) {
        ALOGE("Output file descriptor is invalid");
        return INVALID_OPERATION;
    }

    // Get UID and PID here for permission checking
    mClientUid = IPCThreadState::self()->getCallingUid();
    mClientPid = IPCThreadState::self()->getCallingPid();

    status_t status = OK;

    switch (mOutputFormat) {
        case OUTPUT_FORMAT_DEFAULT:
        case OUTPUT_FORMAT_THREE_GPP:
        case OUTPUT_FORMAT_MPEG_4:
        case OUTPUT_FORMAT_WEBM:
           //配置pMPEG4orWEBM編碼器
            status = setupMPEG4orWEBMRecording();
            break;

        case OUTPUT_FORMAT_AMR_NB:
        case OUTPUT_FORMAT_AMR_WB:
            status = setupAMRRecording();
            break;

        case OUTPUT_FORMAT_AAC_ADIF:
        case OUTPUT_FORMAT_AAC_ADTS:
            status = setupAACRecording();
            break;

        case OUTPUT_FORMAT_RTP_AVP:
            status = setupRTPRecording();
            break;

        case OUTPUT_FORMAT_MPEG2TS:
            status = setupMPEG2TSRecording();
            break;

        default:
            if (handleCustomRecording() != OK) {
                ALOGE("Unsupported output file format: %d", mOutputFormat);
                status = UNKNOWN_ERROR;
            }
            break;
    }

    ALOGV("Recording frameRate: %d captureFps: %f",
            mFrameRate, mCaptureFps);

    return status;
}

setupMPEG4orWEBMRecording代碼實現

//frameworks\av\media\libmediaplayerservice\StagefrightRecorder.cpp
status_t StagefrightRecorder::setupMPEG4orWEBMRecording() {
    mWriter.clear();
    mTotalBitRate = 0;

    status_t err = OK;
    sp<MediaWriter> writer;
    sp<MPEG4Writer> mp4writer;
    if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
        writer = new WebmWriter(mOutputFd);
    } else {
       //創建的是CreateMPEG4Writer
        writer = mp4writer = AVFactory::get()->CreateMPEG4Writer(mOutputFd);
    }

    if (mVideoSource < VIDEO_SOURCE_LIST_END) {
        setDefaultVideoEncoderIfNecessary();

        sp<MediaSource> mediaSource;
        //創建mediaSource,本文應該是空
        err = setupMediaSource(&mediaSource);
        if (err != OK) {
            return err;
        }
       //創建encoder,下步重點分析
        sp<MediaCodecSource> encoder;
        err = setupVideoEncoder(mediaSource, &encoder);
        if (err != OK) {
            return err;
        }
       //向MPEG4Writer添加encoder
        writer->addSource(encoder);
        mVideoEncoderSource = encoder;
        mTotalBitRate += mVideoBitRate;
    }

    if (mOutputFormat != OUTPUT_FORMAT_WEBM) {
        // Audio source is added at the end if it exists.
        // This help make sure that the "recoding" sound is suppressed for
        // camcorder applications in the recorded files.
        // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
        // disable audio for time lapse recording
        bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
        if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
            err = setupAudioEncoder(writer);
            if (err != OK) return err;
            mTotalBitRate += mAudioBitRate;
        }

        if (mCaptureFpsEnable) {
            mp4writer->setCaptureRate(mCaptureFps);
        }

        if (mInterleaveDurationUs > 0) {
            mp4writer->setInterleaveDuration(mInterleaveDurationUs);
        }
        if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
            mp4writer->setGeoData(mLatitudex10000, mLongitudex10000);
        }
    }
    if (mMaxFileDurationUs != 0) {
        writer->setMaxFileDuration(mMaxFileDurationUs);
    }
    if (mMaxFileSizeBytes != 0) {
        writer->setMaxFileSize(mMaxFileSizeBytes);
    }
    if (mVideoSource == VIDEO_SOURCE_DEFAULT
            || mVideoSource == VIDEO_SOURCE_CAMERA) {
        mStartTimeOffsetMs = mEncoderProfiles->getStartTimeOffsetMs(mCameraId);
    } else if (mVideoSource == VIDEO_SOURCE_SURFACE) {
        // surface source doesn't need large initial delay
        mStartTimeOffsetMs = 100;
    }
    if (mStartTimeOffsetMs > 0) {
        writer->setStartTimeOffsetMs(mStartTimeOffsetMs);
    }

    writer->setListener(mListener);
    mWriter = writer;
    return OK;
}

分析下setupVideoEncoder

status_t StagefrightRecorder::setupVideoEncoder(
        const sp<MediaSource> &cameraSource,
        sp<MediaCodecSource> *source) {
    source->clear();

    sp<AMessage> format = new AMessage();
   //本文采用的是VIDEO_ENCODER_H264編碼??VIDEO_ENCODER_MPEG_4_SP??待確定
    switch (mVideoEncoder) {
        case VIDEO_ENCODER_H263:
            format->setString("mime", MEDIA_MIMETYPE_VIDEO_H263);
            break;

        case VIDEO_ENCODER_MPEG_4_SP:
            format->setString("mime", MEDIA_MIMETYPE_VIDEO_MPEG4);
            break;

        case VIDEO_ENCODER_H264:
            format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
            break;

        case VIDEO_ENCODER_VP8:
            format->setString("mime", MEDIA_MIMETYPE_VIDEO_VP8);
            break;

        case VIDEO_ENCODER_HEVC:
            format->setString("mime", MEDIA_MIMETYPE_VIDEO_HEVC);
            break;

        default:
            CHECK(!"Should not be here, unsupported video encoding.");
            break;
    }
    //cameraSource爲NULL
    if (cameraSource != NULL) {
        sp<MetaData> meta = cameraSource->getFormat();

        int32_t width, height, stride, sliceHeight, colorFormat;
        CHECK(meta->findInt32(kKeyWidth, &width));
        CHECK(meta->findInt32(kKeyHeight, &height));
        CHECK(meta->findInt32(kKeyStride, &stride));
        CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
        CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));

        format->setInt32("width", width);
        format->setInt32("height", height);
        format->setInt32("stride", stride);
        format->setInt32("slice-height", sliceHeight);
        format->setInt32("color-format", colorFormat);
    } else {
       //HFR走的是該處
        format->setInt32("width", mVideoWidth);
        format->setInt32("height", mVideoHeight);
        format->setInt32("stride", mVideoWidth);
        format->setInt32("slice-height", mVideoHeight);
        format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);

        // set up time lapse/slow motion for surface source
        if (mCaptureFpsEnable) {
            if (!(mCaptureFps > 0.)) {
                ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
                return BAD_VALUE;
            }
            format->setDouble("time-lapse-fps", mCaptureFps);
        }
    }

    setupCustomVideoEncoderParams(cameraSource, format);

    format->setInt32("bitrate", mVideoBitRate);
    format->setInt32("frame-rate", mFrameRate);
    format->setInt32("i-frame-interval", mIFramesIntervalSec);

    if (mVideoTimeScale > 0) {
        format->setInt32("time-scale", mVideoTimeScale);
    }
    if (mVideoEncoderProfile != -1) {
        format->setInt32("profile", mVideoEncoderProfile);
    }
    if (mVideoEncoderLevel != -1) {
        format->setInt32("level", mVideoEncoderLevel);
    }

    uint32_t tsLayers = 1;
    bool preferBFrames = true; // we like B-frames as it produces better quality per bitrate
    format->setInt32("priority", 0 /* realtime */);
    float maxPlaybackFps = mFrameRate; // assume video is only played back at normal speed

    if (mCaptureFpsEnable) {
        format->setFloat("operating-rate", mCaptureFps);

        // enable layering for all time lapse and high frame rate recordings
        if (mFrameRate / mCaptureFps >= 1.9) { // time lapse
            preferBFrames = false;
            tsLayers = 2; // use at least two layers as resulting video will likely be sped up
        } else if (mCaptureFps > maxPlaybackFps) { // slow-mo
            //HFR 走的是該出
            format->setInt32("high-frame-rate", 1);
            maxPlaybackFps = mCaptureFps; // assume video will be played back at full capture speed
            preferBFrames = false;
        }
    }

    for (uint32_t tryLayers = 1; tryLayers <= kMaxNumVideoTemporalLayers; ++tryLayers) {
        if (tryLayers > tsLayers) {
            tsLayers = tryLayers;
        }
        // keep going until the base layer fps falls below the typical display refresh rate
        float baseLayerFps = maxPlaybackFps / (1 << (tryLayers - 1));
        if (baseLayerFps < kMinTypicalDisplayRefreshingRate / 0.9) {
            break;
        }
    }

    // mIFramesIntervalSec == 0 means all Intra frame, can't support P/B layers
    if (tsLayers > 1 && mIFramesIntervalSec != 0) {
        uint32_t bLayers = std::min(2u, tsLayers - 1); // use up-to 2 B-layers
        uint32_t pLayers = tsLayers - bLayers;
        format->setString(
                "ts-schema", AStringPrintf("android.generic.%u+%u", pLayers, bLayers));

        // TODO: some encoders do not support B-frames with temporal layering, and we have a
        // different preference based on use-case. We could move this into camera profiles.
        format->setInt32("android._prefer-b-frames", preferBFrames);
    }

    if (mMetaDataStoredInVideoBuffers != kMetadataBufferTypeInvalid) {
        format->setInt32("android._input-metadata-buffer-type", mMetaDataStoredInVideoBuffers);
    }

    if (mOutputFormat == OUTPUT_FORMAT_MPEG_4) {
        format->setInt32("mpeg4-writer", 1);
        format->setInt32("nal-length", 4);
    }

    uint32_t flags = 0;
    if (cameraSource == NULL) {
        //hfr 走的是該處
        flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
    } else {
        // require dataspace setup even if not using surface input
        format->setInt32("android._using-recorder", 1);
    }
    //創建MediaCodecSource對象encoder
    sp<MediaCodecSource> encoder = MediaCodecSource::Create(
            mLooper, format, cameraSource, mPersistentSurface, flags);
    if (encoder == NULL) {
        ALOGE("Failed to create video encoder");
        // When the encoder fails to be created, we need
        // release the camera source due to the camera's lock
        // and unlock mechanism.
        if (cameraSource != NULL) {
            cameraSource->stop();
        }
        return UNKNOWN_ERROR;
    }

    //HFR的cameraSource爲NULL,是通過encoder->getGraphicBufferProducer()來獲取mGraphicBufferProducer的
    if (cameraSource == NULL) {
        mGraphicBufferProducer = encoder->getGraphicBufferProducer();
    }

    *source = encoder;

    return OK;
}

分析下MediaCodecSource::Create(..)

//frameworks\av\media\libstagefright\MediaCodecSource.cpp
sp<MediaCodecSource> MediaCodecSource::Create(
        const sp<ALooper> &looper,
        const sp<AMessage> &format,
        const sp<MediaSource> &source,
        const sp<PersistentSurface> &persistentSurface,
        uint32_t flags) {
      //創建MediaCodecSource
    sp<MediaCodecSource> mediaSource = new MediaCodecSource(
            looper, format, source, persistentSurface, flags);
     //獲取HFR參數,賦值給mediaSource
    AVUtils::get()->getHFRParams(&mediaSource->mIsHFR, &mediaSource->mBatchSize, format);
    //mediaSource初始化,mediarecorder獲取的sureface就是在該方法中初始化的,稍後會重點分析,
    if (mediaSource->init() == OK) {
        return mediaSource;
    }
    return NULL;
}

分析下encoder->getGraphicBufferProducer()

//frameworks\av\media\libstagefright\MediaCodecSource.cpp
sp<IGraphicBufferProducer> MediaCodecSource::getGraphicBufferProducer() {
    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
    return mGraphicBufferProducer;
}

至此,可見mMediaRecorder.getSurface()獲取的surface實際上獲取的是MediaCodecSource.cpp類成員變量mGraphicBufferProducer封裝成 的surface對象。

接下來繼續分析下,MediaCodecSource.cpp類成員變量mGraphicBufferProducer的初始化過程 其是在MediaCodecSource::Create(…)`中的mediaSource->init()初始化的,代碼如下:

//frameworks\av\media\libstagefright\MediaCodecSource.cpp
status_t MediaCodecSource::init() {
    //初始化
    status_t err = initEncoder();

    if (err != OK) {
        releaseEncoder();
    }

    return err;
}

繼續分析initEncoder

//frameworks\av\media\libstagefright\MediaCodecSource.cpp
status_t MediaCodecSource::initEncoder() {
    mReflector = new AHandlerReflector<MediaCodecSource>(this);
    mLooper->registerHandler(mReflector);
    //啓動消息循環線程,之後分析AMessage時會用到
    mCodecLooper = new ALooper;
    mCodecLooper->setName("codec_looper");
    mCodecLooper->start();

    if (mFlags & FLAG_USE_SURFACE_INPUT) {
        mOutputFormat->setInt32("create-input-buffers-suspended", 1);
    }

    AString outputMIME;
    CHECK(mOutputFormat->findString("mime", &outputMIME));

    Vector<AString> matchingCodecs;
    if (AVUtils::get()->useQCHWEncoder(mOutputFormat, &matchingCodecs)) {
        ;
    } else {
        //尋找與之匹配的Codecs
        MediaCodecList::findMatchingCodecs(
                outputMIME.c_str(), true /* encoder */,
                ((mFlags & FLAG_PREFER_SOFTWARE_CODEC) ? MediaCodecList::kPreferSoftwareCodecs : 0),
                &matchingCodecs);
    }

    status_t err = NO_INIT;
    for (size_t ix = 0; ix < matchingCodecs.size(); ++ix) {
        //創建MediaCodec
        mEncoder = MediaCodec::CreateByComponentName(
                mCodecLooper, matchingCodecs[ix]);

        if (mEncoder == NULL) {
            continue;
        }

        ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());

        mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
        mEncoder->setCallback(mEncoderActivityNotify);
        //初始化MediaCodec
        err = mEncoder->configure(
                    mOutputFormat,
                    NULL /* nativeWindow */,
                    NULL /* crypto */,
                    MediaCodec::CONFIGURE_FLAG_ENCODE);

        if (err == OK) {
            break;
        }
        mEncoder->release();
        mEncoder = NULL;
    }

    if (err != OK) {
        return err;
    }

    mEncoder->getOutputFormat(&mOutputFormat);
    sp<MetaData> meta = new MetaData;
    convertMessageToMetaData(mOutputFormat, meta);
    mMeta.lock().set(meta);

    if (mFlags & FLAG_USE_SURFACE_INPUT) {
        CHECK(mIsVideo);

        if (mPersistentSurface != NULL) {
            // When using persistent surface, we are only interested in the
            // consumer, but have to use PersistentSurface as a wrapper to
            // pass consumer over messages (similar to BufferProducerWrapper)
            err = mEncoder->setInputSurface(mPersistentSurface);
        } else {
            //終於到達了創建surface的地方了
            err = mEncoder->createInputSurface(&mGraphicBufferProducer);
        }

        if (err != OK) {
            return err;
        }
    }

    sp<AMessage> inputFormat;
    int32_t usingSwReadOften;
    mSetEncoderFormat = false;
    if (mEncoder->getInputFormat(&inputFormat) == OK) {
        mSetEncoderFormat = true;
        if (inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
                && usingSwReadOften) {
            // this is a SW encoder; signal source to allocate SW readable buffers
            mEncoderFormat = kDefaultSwVideoEncoderFormat;
        } else {
            mEncoderFormat = kDefaultHwVideoEncoderFormat;
        }
        if (!inputFormat->findInt32("android._dataspace", &mEncoderDataSpace)) {
            mEncoderDataSpace = kDefaultVideoEncoderDataSpace;
        }
        ALOGV("setting dataspace %#x, format %#x", mEncoderDataSpace, mEncoderFormat);
    }
    //啓動MediaCodec
    err = mEncoder->start();

    if (err != OK) {
        return err;
    }

    {
        Mutexed<Output>::Locked output(mOutput);
        output->mEncoderReachedEOS = false;
        output->mErrorCode = OK;
    }

    return OK;
}

繼續分析surface的創建

//frameworks\av\media\libstagefright\MediaCodec.cpp
status_t MediaCodec::createInputSurface(
        sp<IGraphicBufferProducer>* bufferProducer) {
    sp<AMessage> msg = new AMessage(kWhatCreateInputSurface, this);

    sp<AMessage> response;
    //是通過發消息kWhatCreateInputSurface來創建的surface的
    //靠,還需要研究下AMessage的發送接收!!!哥已暈
    status_t err = PostAndAwaitResponse(msg, &response);
    if (err == NO_ERROR) {
        // unwrap the sp<IGraphicBufferProducer>
        sp<RefBase> obj;
        bool found = response->findObject("input-surface", &obj);
        CHECK(found);
        sp<BufferProducerWrapper> wrapper(
                static_cast<BufferProducerWrapper*>(obj.get()));
        *bufferProducer = wrapper->getBufferProducer();
    } else {
        ALOGW("createInputSurface failed, err=%d", err);
    }
    return err;
}

接下來分下下AMessage機制

啓動過程

//frameworks\av\media\libstagefright\MediaCodecSource.cpp
status_t MediaCodecSource::initEncoder() {
    mReflector = new AHandlerReflector<MediaCodecSource>(this);
    mLooper->registerHandler(mReflector);
    //創建ALooper線程
    mCodecLooper = new ALooper;
    mCodecLooper->setName("codec_looper");
    mCodecLooper->start();

   ...

    status_t err = NO_INIT;
    for (size_t ix = 0; ix < matchingCodecs.size(); ++ix) {
    //將上一步創建的mCodecLooper傳入待創建的MediaCodec
        mEncoder = MediaCodec::CreateByComponentName(
                mCodecLooper, matchingCodecs[ix]);

        ....
    return OK;
}
// static
sp<MediaCodec> MediaCodec::CreateByComponentName(
        const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid, uid_t uid) {
    VTRACE_CALL();
    //將上一步創建的mCodecLooper傳入待創建的MediaCodec
    sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
    //進入MediaCodec 的init方法
    const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
    if (err != NULL) {
        *err = ret;
    }
    return ret == OK ? codec : NULL; // NULL deallocates codec.
}

爲方便分析,先給出MediaCodec類圖
在這裏插入圖片描述
可見MediaCodec繼承AHandler
進入init

status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
    ...
    // 創建的Acode
    mCodec = GetCodecBase(name, nameIsType);
    if (mIsVideo) {
        // video codec needs dedicated looper
        if (mCodecLooper == NULL) {
            mCodecLooper = new ALooper;
            mCodecLooper->setName("CodecLooper");
            mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
        }

        mCodecLooper->registerHandler(mCodec);
    } else {
      //給上一步驟創建的MediaCodec設置mLooper及ID,將Loop和ID設置給Handle
        mLooper->registerHandler(mCodec);
    }
    
    mLooper->registerHandler(this);

    ...
    return err;
}

繼續分析registerHandler

//frameworks\av\media\libstagefright\foundation\ALooper.cpp
ALooper::handler_id ALooper::registerHandler(const sp<AHandler> &handler) {
    return gLooperRoster.registerHandler(this, handler);
}

繼續分析registerHandler

//frameworks\av\media\libstagefright\foundation\ALooperRoster.cpp
ALooper::handler_id ALooperRoster::registerHandler(
        const sp<ALooper> &looper, const sp<AHandler> &handler) {
    Mutex::Autolock autoLock(mLock);

    if (handler->id() != 0) {
        CHECK(!"A handler must only be registered once.");
        return INVALID_OPERATION;
    }

    HandlerInfo info;
    info.mLooper = looper;
    info.mHandler = handler;
    ALooper::handler_id handlerID = mNextHandlerID++;
    mHandlers.add(handlerID, info);

    handler->setID(handlerID, looper);

    return handlerID;
}

繼續分析handler->setID()

//frameworks\av\media\libstagefright\foundation\include\media\stagefright\foundation\AHandler.h
    inline void setID(ALooper::handler_id id, const wp<ALooper> &looper) {
        mID = id;
        mLooper = looper;
    }

接着分析AMessage的使用
AMessage創建

//frameworks\av\media\libstagefright\foundation\AMessage.cpp
AMessage::AMessage(uint32_t what, const sp<const AHandler> &handler)
    : mWhat(what),
      mNumItems(0) {
    setTarget(handler);
}

接着分析下

void AMessage::setTarget(const sp<const AHandler> &handler) {
    if (handler == NULL) {
        mTarget = 0;
        mHandler.clear();
        mLooper.clear();
    } else {
        mTarget = handler->id();
        mHandler = handler->getHandler();
        mLooper = handler->getLooper();
    }
}

可見,最終通過handler獲取了mLooper和id

接着分析消息發送

status_t AMessage::postAndAwaitResponse(sp<AMessage> *response) {
    sp<ALooper> looper = mLooper.promote();
    if (looper == NULL) {
        ALOGW("failed to post message as target looper for handler %d is gone.", mTarget);
        return -ENOENT;
    }

    sp<AReplyToken> token = looper->createReplyToken();
    if (token == NULL) {
        ALOGE("failed to create reply token");
        return -ENOMEM;
    }
    setObject("replyID", token);
    //給ALoopeer發送消息
    looper->post(this, 0 /* delayUs */);
    //從ALopper中獲取結果
    return looper->awaitResponse(token, response);
}
void ALooper::post(const sp<AMessage> &msg, int64_t delayUs) {
    Mutex::Autolock autoLock(mLock);

    int64_t whenUs;
    if (delayUs > 0) {
        whenUs = GetNowUs() + delayUs;
    } else {
        whenUs = GetNowUs();
    }

    List<Event>::iterator it = mEventQueue.begin();
    while (it != mEventQueue.end() && (*it).mWhenUs <= whenUs) {
        ++it;
    }
   //根據msg,形成Event
    Event event;
    event.mWhenUs = whenUs;
    event.mMessage = msg;

    if (it == mEventQueue.begin()) {
        mQueueChangedCondition.signal();
    }
    //插入消息隊列
    mEventQueue.insert(it, event);
}
//frameworks\av\media\libstagefright\foundation\ALooper.cpp
bool ALooper::loop() {
    Event event;

    {
     ...
     取消息

        event = *mEventQueue.begin();
        mEventQueue.erase(mEventQueue.begin());
    }
//發消息
    event.mMessage->deliver();

    // NOTE: It's important to note that at this point our "ALooper" object
    // may no longer exist (its final reference may have gone away while
    // delivering the message). We have made sure, however, that loop()
    // won't be called again.

    return true;
}

繼續分析deliver

//\frameworks\av\media\libstagefright\foundation\AMessage.cpp
void AMessage::deliver() {
    sp<AHandler> handler = mHandler.promote();
    if (handler == NULL) {
        ALOGW("failed to deliver message as target handler %d is gone.", mTarget);
        return;
    }

    handler->deliverMessage(this);
}

繼續分析deliverMessage

//frameworks\av\media\libstagefright\foundation\AHandler.cpp
void AHandler::deliverMessage(const sp<AMessage> &msg) {
    onMessageReceived(msg);
   ...
}

進入MediaCodec

//frameworks\av\media\libstagefright\MediaCodec.cpp
void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
    VTRACE_METHOD();
    switch (msg->what()) {
        ...
        case kWhatCreateInputSurface:
        case kWhatSetInputSurface:
        {
         .....
            mReplyID = replyID;
            if (msg->what() == kWhatCreateInputSurface) {
               //給ACode發送消息創建surface
                mCodec->initiateCreateInputSurface();
            } 
            break;
        }
      
        ....
        default:
            TRESPASS();
    }
}

最終進入ACodec.cpp

//frameworks\av\media\libstagefright\ACodec.cpp
bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
    bool handled = false;

    switch (msg->what()) {
       ....

        case ACodec::kWhatCreateInputSurface:
        {
            onCreateInputSurface(msg);
            handled = true;
            break;
        }

    ...

    return handled;
}

進如創建surface的流程

//frameworks\av\media\libstagefright\ACodec.cpp
void ACodec::LoadedState::onCreateInputSurface(
        const sp<AMessage> & /* msg */) {
    ALOGV("onCreateInputSurface");
     //創建surface
    sp<IGraphicBufferProducer> bufferProducer;
    //mOMX類型是sp<IOMX> mOMX,接下來分析下其創建過程
    status_t err = mCodec->mOMX->createInputSurface(
            &bufferProducer, &mCodec->mGraphicBufferSource);

    ...
}

mOMX的創建過程

//frameworks\av\media\libstagefright\ACodec.cpp
bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
    ALOGV("onAllocateComponent");
   ....

    sp<CodecObserver> observer = new CodecObserver;
    sp<IOMX> omx;
    sp<IOMXNode> omxNode;

    status_t err = NAME_NOT_FOUND;
    for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
            ++matchIndex) {
        componentName = matchingCodecs[matchIndex];

        OMXClient client;
        bool trebleFlag;
        //創建IOMX對象
        if (client.connect(owners[matchIndex].c_str(), &trebleFlag) != OK) {
            mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
            return false;
        }
        //omx 是通過client獲取的,
        omx = client.interface();

    ...
    mCodec->mOMX = omx;
    mCodec->mOMXNode = omxNode;
    mCodec->mCallback->onComponentAllocated(mCodec->mComponentName.c_str());
    mCodec->changeState(mCodec->mLoadedState);

    return true;
}

接着分析下,IOMX對象的創建connect

//frameworks\av\media\libstagefright\OMXClient.cpp
status_t OMXClient::connect(const char* name, bool* trebleFlag) {
    if (property_get_bool("persist.media.treble_omx", true)) {
        if (trebleFlag != nullptr) {
            *trebleFlag = true;
        }
        return connectTreble(name);
    }
    if (trebleFlag != nullptr) {
        *trebleFlag = false;
    }
    //進入的是該函數
    return connectLegacy();
}
status_t OMXClient::connectLegacy() {
    //獲取服務代理對象codecservice 
    sp<IServiceManager> sm = defaultServiceManager();
    sp<IBinder> codecbinder = sm->getService(String16("media.codec"));
    sp<IMediaCodecService> codecservice = interface_cast<IMediaCodecService>(codecbinder);

    if (codecservice.get() == NULL) {
        ALOGE("Cannot obtain IMediaCodecService");
        return NO_INIT;
    }
    //通過codecservice 獲取mOMX對象
    mOMX = codecservice->getOMX();
    if (mOMX.get() == NULL) {
        ALOGE("Cannot obtain mediacodec IOMX");
        return NO_INIT;
    }

    return OK;
}

進入到media.codec服務
服務實現代碼:
rc文件:

//frameworks\av\services\mediacodec\[email protected]
service mediacodec /vendor/bin/hw/android.hardware.media.omx@1.0-service
    class main
    user mediacodec
    group camera drmrpc mediadrm audio
    ioprio rt 4
    writepid /dev/cpuset/foreground/tasks

進程入口:

//frameworks\av\services\mediacodec\main_codecservice.cpp

int main(int argc __unused, char** argv)
{
    LOG(INFO) << "mediacodecservice starting";
    ....
    if (treble) {
    ...
    } else {
        MediaCodecService::instantiate();
        LOG(INFO) << "Non-Treble OMX service created.";
    }

    ProcessState::self()->startThreadPool();
    IPCThreadState::self()->joinThreadPool();
}

getOMX實現

//frameworks\av\services\mediacodec\MediaCodecService.cpp
sp<IOMX> MediaCodecService::getOMX() {

    Mutex::Autolock autoLock(mOMXLock);

    if (mOMX.get() == NULL) {
    //創建OMX對象
        mOMX = new OMX();
    }
    //返回OMX對象
    return mOMX;
}

OMX類圖如下:
在這裏插入圖片描述
繼續分析surface 創建,上一步分析了OMX的創建過程,現在進入OMX createInputSurface創建surface

//frameworks\av\media\libstagefright\omx\OMX.cpp
status_t OMX::createInputSurface(
        sp<IGraphicBufferProducer> *bufferProducer,
        sp<IGraphicBufferSource> *bufferSource) {
    if (bufferProducer == NULL || bufferSource == NULL) {
        ALOGE("b/25884056");
        return BAD_VALUE;
    }
   //創建GraphicBufferSource對象,surface就是在該方法創建出來的
    sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
    status_t err = graphicBufferSource->initCheck();
    if (err != OK) {
        ALOGE("Failed to create persistent input surface: %s (%d)",
                strerror(-err), err);
        return err;
    }

    *bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
    *bufferSource = new BWGraphicBufferSource(graphicBufferSource);

    return OK;
}
//frameworks\av\media\libstagefright\omx\GraphicBufferSource.cpp
GraphicBufferSource::GraphicBufferSource() {
    ALOGV("GraphicBufferSource");

    String8 name("GraphicBufferSource");
    //創建surface,千辛萬苦,終於看到了surface的創建的地方了
    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
    mConsumer->setConsumerName(name);

    // Note that we can't create an sp<...>(this) in a ctor that will not keep a
    // reference once the ctor ends, as that would cause the refcount of 'this'
    // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
    // that's what we create.
    wp<BufferQueue::ConsumerListener> listener =
            static_cast<BufferQueue::ConsumerListener*>(this);
    sp<IConsumerListener> proxy =
            new BufferQueue::ProxyConsumerListener(listener);

    mInitCheck = mConsumer->consumerConnect(proxy, false);
 ... 
}

至此分析完成,mediarecorder創建其內部的surface流程

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章