WebRTC 接收到 offer 指令後流程分析與 jitterbuffer 數據到解碼器的流程分析

----------------------------------------------------------------------------------------------------------------------------------------

一分鐘快速搭建 rtmpd 服務器: https://blog.csdn.net/freeabc/article/details/102880984

軟件下載地址: http://www.qiyicc.com/download/rtmpd.rar

github 地址:https://github.com/superconvert/smart_rtmpd

-----------------------------------------------------------------------------------------------------------------------------------------

WebRTC 接收到 offer 指令後流程分析與 jitterbuffer 數據到解碼器的流程分析

 

//*************************************************************************************************
//
// 接收端收到對方的 offer 指令後的流程分析
//
//*************************************************************************************************
1. 接收到對方的 offer 指令 ( websocket 信令 )
WebSocketRTCClient.java

void WebSocketRTCClient::onWebSocketMessage(final String msg)
    // 接收到 offer 指令
    if (type.equals("offer")) {
        if (!initiator) {
            SessionDescription sdp = new SessionDescription(
                SessionDescription.Type.fromCanonicalForm(type), json.getString("sdp"));
            events.onRemoteDescription(sdp);
        } else {
            reportError("Received offer for call receiver: " + msg);
        }
    }

2. 回調到此函數
CallActivity.java

void CallActivity::onRemoteDescription(final SessionDescription sdp)
    // 這個函數的流程分析參見下面的 3
    peerConnectionClient.setRemoteDescription(sdp);
    if (!signalingParameters.initiator) {        
        // 這個函數的流程分析參見下面的 4
        peerConnectionClient.createAnswer();
    }

    
3. peerConnectionClient.setRemoteDescription 流程分析,主要是建立傳輸對象,以及 
pipeline ( socket --> jitterbuffer --> encoder --> render ) 上所有的組件,並管理這些組件的關係
    // Java    

void PeerConnectionClient::setRemoteDescription(final SessionDescription sdp)
        peerConnection.setRemoteDescription(sdpObserver, sdpRemote);

   // Java

 PeerConnection::setRemoteDescription(SdpObserver observer, SessionDescription sdp)
        nativeSetRemoteDescription(observer, sdp);

   // JNI C++

    JNI_GENERATOR_EXPORT void Java_org_webrtc_PeerConnection_nativeSetRemoteDescription(
        JNIEnv* env,
        jobject jcaller,
        jobject observer,
        jobject sdp) {
        return JNI_PeerConnection_SetRemoteDescription(env, base::android::JavaParamRef<jobject>(env,
            jcaller), base::android::JavaParamRef<jobject>(env, observer),
            base::android::JavaParamRef<jobject>(env, sdp));
    }

     3.1
    ./sdk/android/src/jni/pc/peer_connection.cc    

static void JNI_PeerConnection_SetRemoteDescription(
        JNIEnv* jni,
        const JavaParamRef<jobject>& j_pc,
        const JavaParamRef<jobject>& j_observer,
        const JavaParamRef<jobject>& j_sdp) {
        rtc::scoped_refptr<SetSdpObserverJni> observer(
            new rtc::RefCountedObject<SetSdpObserverJni>(jni, j_observer, nullptr));
        ExtractNativePC(jni, j_pc)->SetRemoteDescription(
            observer, JavaToNativeSessionDescription(jni, j_sdp).release());
    }

     3.2
    ./pc/peer_connection.cc    

 void PeerConnection::SetRemoteDescription(
        SetSessionDescriptionObserver* observer,
        SessionDescriptionInterface* desc_ptr)
        this_weak_ptr->DoSetRemoteDescription(
            std::move(desc),
            rtc::scoped_refptr<SetRemoteDescriptionObserverInterface>(
                new SetRemoteDescriptionObserverAdapter(this_weak_ptr.get(), std::move(observer_refptr))));

    3.3
    ./pc/peer_connection.cc    

void PeerConnection::DoSetRemoteDescription(
        std::unique_ptr<SessionDescriptionInterface> desc,
        rtc::scoped_refptr<SetRemoteDescriptionObserverInterface> observer)
        
        error = ApplyRemoteDescription(std::move(desc));
        observer->OnSetRemoteDescriptionComplete(RTCError::OK());

    3.4
    ./pc/peer_connection.cc  

 RTCError PeerConnection::ApplyRemoteDescription(
        std::unique_ptr<SessionDescriptionInterface> desc)
        
        // 參見流程 3.4.1
        RTCError error = PushdownTransportDescription(cricket::CS_REMOTE, type);
        // 參見流程 3.4.2
        RTCError error = CreateChannels(*remote_description()->description());
        // 參見流程 3.4.3
        error = UpdateSessionState(type, cricket::CS_REMOTE, remote_description()->description());

       3.4.1 PushdownTransportDescription       

RTCError PeerConnection::PushdownTransportDescription(
            cricket::ContentSource source, SdpType type) {
            if (source == cricket::CS_LOCAL) {
                const SessionDescriptionInterface* sdesc = local_description();
                RTC_DCHECK(sdesc);
                return transport_controller_->SetLocalDescription(type, sdesc->description());
            } else {
                // 調用這個流程
                const SessionDescriptionInterface* sdesc = remote_description();
                RTC_DCHECK(sdesc);
                return transport_controller_->SetRemoteDescription(type, sdesc->description());
            }
        }

          ./pc/jsep_transport_controller.cc

RTCError JsepTransportController::SetRemoteDescription(
            SdpType type, const cricket::SessionDescription* description)
            return ApplyDescription_n(/*local=*/false, type, description);

          ./pc/jsep_transport_controller.cc

        RTCError JsepTransportController::ApplyDescription_n(
            bool local, SdpType type, const cricket::SessionDescription* description)
            
            for (const cricket::ContentInfo& content_info : description->contents()) {
                // Don't create transports for rejected m-lines and bundled m-lines."
                if (content_info.rejected ||
                    (IsBundled(content_info.name) && content_info.name != *bundled_mid())) {
                    continue;
                }
                // 參見博文 https://blog.csdn.net/freeabc/article/details/106287318
                // 內,有關 JsepTransportController::MaybeCreateJsepTransport 的分析
                // 其實就是創建一個底層的傳輸對象,並綁定之間的傳輸關係
                error = MaybeCreateJsepTransport(local, content_info, *description);
                if (!error.ok()) {
                    return error;
                }
            }
            
            for (size_t i = 0; i < description->contents().size(); ++i) {                
                SetIceRole_n(DetermineIceRole(transport, transport_info, type, local));
                transport->SetRemoteJsepTransportDescription(jsep_description, type);
            }

        3.4.2 CreateChannels 創建音視頻通道
        // 參見博文 https://blog.csdn.net/freeabc/article/details/106287318        

RTCError PeerConnection::CreateChannels(const SessionDescription& desc)
            // 就是創建一個 VideoChannel ,而 VideoChannel 的 media_channel 就是 WebRtcVideoChannel
            cricket::VideoChannel* video_channel = CreateVideoChannel(video->name);
            if (!video_channel) {
                LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, "Failed to create video channel.");
            }
            // 綁定 RtpTransceiver 與 VideoChannel 
            GetVideoTransceiver()->internal()->SetChannel(video_channel);

        3.4.3 UpdateSessionState 主要是更新通道並建立各個 pipeline 的鏈接
        // 參見博文 https://blog.csdn.net/freeabc/article/details/106287318        

RTCError PeerConnection::UpdateSessionState(SdpType type, cricket::ContentSource source, 
            const cricket::SessionDescription* description)
            error = PushdownMediaDescription(type, source);

 

 RTCError PeerConnection::PushdownMediaDescription(SdpType type, cricket::ContentSource source)
            for (const auto& transceiver : transceivers_) {
                const ContentInfo* content_info = FindMediaSectionForTransceiver(transceiver, sdesc);
                cricket::ChannelInterface* channel = transceiver->internal()->channel();
                const MediaContentDescription* content_desc = content_info->media_description();
                bool success = (source == cricket::CS_LOCAL)
                    ? channel->SetLocalContent(content_desc, type, &error)
                    : channel->SetRemoteContent(content_desc, type, &error);

        ./pc/channel.cc         

bool BaseChannel::SetRemoteContent(const MediaContentDescription* content,
            SdpType type, std::string* error_desc) {
            TRACE_EVENT0("webrtc", "BaseChannel::SetRemoteContent");
            return InvokeOnWorker<bool>(RTC_FROM_HERE,
                Bind(&BaseChannel::SetRemoteContent_w, this, content, type, error_desc));
        }

        ./pc/channel.cc 

 bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
            SdpType type, std::string* error_desc)
            if (!UpdateRemoteStreams_w(video->streams(), type, error_desc)) {
                SafeSetError("Failed to set remote video description streams.", error_desc);
                return false;
            }

        ./pc/channel.cc 

        bool BaseChannel::UpdateRemoteStreams_w(const std::vector<StreamParams>& streams,
            SdpType type, std::string* error_desc)
            
            for (const StreamParams& new_stream : streams) {
                // We allow a StreamParams with an empty list of SSRCs, in which case the
                // MediaChannel will cache the parameters and use them for any unsignaled
                // stream received later.
                if ((!new_stream.has_ssrcs() && !HasStreamWithNoSsrcs(remote_streams_)) ||
                    !GetStreamBySsrc(remote_streams_, new_stream.first_ssrc())) {
                    // 參見流程 3.4.3.1
                    if (AddRecvStream_w(new_stream)) {
                    }
                }
            }
            // 參見流程 3.4.3.2,就是建立底層的接收到的 RTP 數據到 channel 裏面來
            RegisterRtpDemuxerSink();
            remote_streams_ = streams;

         3.4.3.1
        ./pc/channel.cc        

bool BaseChannel::AddRecvStream_w(const StreamParams& sp) {
            RTC_DCHECK(worker_thread() == rtc::Thread::Current());
            // media_channel 就是 WebRtcVideoChannel 對象
            // 參看博客 https://blog.csdn.net/freeabc/article/details/106287318
            // 流程 22
            return media_channel()->AddRecvStream(sp);
        }

        ./media/engine/webrtc_video_engine.cc

bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp) {
            return AddRecvStream(sp, false);
        }

        ./media/engine/webrtc_video_engine.cc       

 bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp,
            bool default_stream)
            
            receive_streams_[ssrc] = new WebRtcVideoReceiveStream(
                this, call_, sp, std::move(config), decoder_factory_, default_stream,
                recv_codecs_, flexfec_config);

            有關 WebRtcVideoReceiveStream 的深入分析,參見下面的
            <<視頻數據接收到後,從 jitterbuffer 中到解碼的過程分析>>
            
        至此 WebRtcVideoChannel 中的 receive_streams_ 對象產生, 就是 WebRtcVideoReceiveStream 流        
        
        3.4.3.2
        參見博文 https://blog.csdn.net/freeabc/article/details/106142951
        流程 10.3.3
        主要目的是:我們看到 VideoChannel 做爲 Sink 加到 RtpTransport 裏的 rtp_demuxer_, 所以 RtpDemuxer::OnRtpPacket
        會調用 VideoChannel::OnRtpPacket,建立 pipeline ( socket ---> jitterbuffer ---> decoder ---> render ) 之間部件的關聯
        

4. peerConnectionClient.createAnswer 流程分析

void PeerConnectionClient::createAnswer() {
        peerConnection.createAnswer(sdpObserver, sdpMediaConstraints);
    }

 

   void PeerConnection::createAnswer(SdpObserver observer, MediaConstraints constraints) {
        nativeCreateAnswer(observer, constraints);
    }
    JNI_GENERATOR_EXPORT void Java_org_webrtc_PeerConnection_nativeCreateAnswer(
        JNIEnv* env,
        jobject jcaller,
        jobject observer,
        jobject constraints) {
        return JNI_PeerConnection_CreateAnswer(env, base::android::JavaParamRef<jobject>(env, jcaller),
            base::android::JavaParamRef<jobject>(env, observer), base::android::JavaParamRef<jobject>(env, constraints));
    }


    ./sdk/android/src/jni/pc/peer_connection.cc

 void JNI_PeerConnection_CreateAnswer(
        JNIEnv* jni,
        const JavaParamRef<jobject>& j_pc,
        const JavaParamRef<jobject>& j_observer,
        const JavaParamRef<jobject>& j_constraints) {
        std::unique_ptr<MediaConstraints> constraints =
            JavaToNativeMediaConstraints(jni, j_constraints);
        rtc::scoped_refptr<CreateSdpObserverJni> observer(
            new rtc::RefCountedObject<CreateSdpObserverJni>(jni, j_observer, std::move(constraints)));
        PeerConnectionInterface::RTCOfferAnswerOptions options;
        CopyConstraintsIntoOfferAnswerOptions(observer->constraints(), &options);
        ExtractNativePC(jni, j_pc)->CreateAnswer(observer, options);
    }


    ./pc/peer_connection.cc

void PeerConnection::CreateAnswer(CreateSessionDescriptionObserver* observer,
        const RTCOfferAnswerOptions& options)
        this_weak_ptr->DoCreateAnswer(options, observer_wrapper);


    ./pc/peer_connection.cc

void PeerConnection::DoCreateAnswer(
        const RTCOfferAnswerOptions& options,
        rtc::scoped_refptr<CreateSessionDescriptionObserver> observer)
        
        webrtc_session_desc_factory_->CreateAnswer(observer, session_options);


     ./pc/webrtc_session_description_factory.cc

void WebRtcSessionDescriptionFactory::CreateAnswer(
        CreateSessionDescriptionObserver* observer,
        const cricket::MediaSessionOptions& session_options)
        InternalCreateAnswer(request);


    ./pc/webrtc_session_description_factory.cc

void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
        CreateSessionDescriptionRequest request)        
        PostCreateSessionDescriptionSucceeded(request.observer, std::move(answer));

        
    // 產生相應的 SDP 並通知上層,做相應的準備工作。
    // 後續流程參考 https://blog.csdn.net/freeabc/article/details/106287318
    // 這個流程主要是初始化操作,包括網絡,中間層的 channel, receive, sender 對象的創建等。
    
//*************************************************************************************************
//
// 視頻數據接收到後,從 jitterbuffer 中到解碼的過程分析
//
//*************************************************************************************************
1. 我們分析 WebRtcVideoReceiveStream 的構造函數

WebRtcVideoChannel::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
    WebRtcVideoChannel* channel,
    webrtc::Call* call,
    const StreamParams& sp,
    webrtc::VideoReceiveStream::Config config,
    webrtc::VideoDecoderFactory* decoder_factory,
    bool default_stream,
    const std::vector<VideoCodecSettings>& recv_codecs,
    const webrtc::FlexfecReceiveStream::Config& flexfec_config)
    : channel_(channel),
      call_(call),
      stream_params_(sp),
      stream_(NULL),
      default_stream_(default_stream),
      config_(std::move(config)),
      flexfec_config_(flexfec_config),
      flexfec_stream_(nullptr),
      decoder_factory_(decoder_factory),
      sink_(NULL),
      first_frame_timestamp_(-1),
      estimated_remote_start_ntp_time_ms_(0) {
  config_.renderer = this;
  ConfigureCodecs(recv_codecs);
  ConfigureFlexfecCodec(flexfec_config.payload_type);
  MaybeRecreateWebRtcFlexfecStream();
  // 參見下面的分析 2
  RecreateWebRtcVideoStream();
}

2.
./media/engine/webrtc_video_engine.cc

void WebRtcVideoChannel::WebRtcVideoReceiveStream::RecreateWebRtcVideoStream() {
    absl::optional<int> base_minimum_playout_delay_ms;
    if (stream_) {
        base_minimum_playout_delay_ms = stream_->GetBaseMinimumPlayoutDelayMs();
        MaybeDissociateFlexfecFromVideo();
        call_->DestroyVideoReceiveStream(stream_);
        stream_ = nullptr;
    }
    webrtc::VideoReceiveStream::Config config = config_.Copy();
    config.rtp.protected_by_flexfec = (flexfec_stream_ != nullptr);
    config.stream_id = stream_params_.id;
    // 這個地方產生了一個 VideoReceiveStream 對象,參見下面的流程 3
    stream_ = call_->CreateVideoReceiveStream(std::move(config));
    if (base_minimum_playout_delay_ms) {
        stream_->SetBaseMinimumPlayoutDelayMs(base_minimum_playout_delay_ms.value());
    }
    MaybeAssociateFlexfecWithVideo();
    // 這個地方參見下面的流程 4
    stream_->Start();

    if (webrtc::field_trial::IsEnabled(
        "WebRTC-Video-BufferPacketsWithUnknownSsrc")) {
        channel_->BackfillBufferedPackets(stream_params_.ssrcs);
    }
}

3. 最終產生一個 VideoReceiveStream 對象
./call/call.cc

webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream(
    webrtc::VideoReceiveStream::Config configuration) {
    TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream");
    RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);

    receive_side_cc_.SetSendPeriodicFeedback(
        SendPeriodicFeedback(configuration.rtp.extensions));

    RegisterRateObserver();

    VideoReceiveStream* receive_stream = new VideoReceiveStream(
        task_queue_factory_, &video_receiver_controller_, num_cpu_cores_,
        transport_send_ptr_->packet_router(), std::move(configuration),
        module_process_thread_.get(), call_stats_.get(), clock_);

    const webrtc::VideoReceiveStream::Config& config = receive_stream->config();
    {
        WriteLockScoped write_lock(*receive_crit_);
        if (config.rtp.rtx_ssrc) {
            // We record identical config for the rtx stream as for the main
            // stream. Since the transport_send_cc negotiation is per payload
            // type, we may get an incorrect value for the rtx stream, but
            // that is unlikely to matter in practice.
            receive_rtp_config_.emplace(config.rtp.rtx_ssrc,
                ReceiveRtpConfig(config));
        }
        receive_rtp_config_.emplace(config.rtp.remote_ssrc,
            ReceiveRtpConfig(config));
        video_receive_streams_.insert(receive_stream);
        ConfigureSync(config.sync_group);
    }
    receive_stream->SignalNetworkState(video_network_state_);
    UpdateAggregateNetworkState();
    event_log_->Log(std::make_unique<RtcEventVideoReceiveStreamConfig>(
        CreateRtcLogStreamConfig(config)));
    return receive_stream;
}

4. VideoReceiveStream 的 start 其實就是 jitterbuffer 的初始化工作
./video/video_receive_stream.cc

void VideoReceiveStream::Start() 
    // jitterbuffer 的初始化見流程 4.1
    frame_buffer_->Start();
    // 這個地方開啓一個 jitterbuffer 的處理
    decode_queue_.PostTask([this] {
        RTC_DCHECK_RUN_ON(&decode_queue_);
        decoder_stopped_ = false;
        // 參見下面的流程 5,投遞一個從 jitterbuffer 獲取幀的任務
        StartNextDecode();
    });


    4.1 Jitter buffer 的產生是 VideoReceiveStream 構造函數中進行的
    ./video/video_receive_stream.cc

VideoReceiveStream::VideoReceiveStream(
        TaskQueueFactory* task_queue_factory,
        RtpStreamReceiverControllerInterface* receiver_controller,
        int num_cpu_cores,
        PacketRouter* packet_router,
        VideoReceiveStream::Config config,
        ProcessThread* process_thread,
        CallStats* call_stats,
        Clock* clock,
        VCMTiming* timing)
        // ./modules/video_coding/frame_buffer2.cc
        frame_buffer_.reset(new video_coding::FrameBuffer(clock_, timing_.get(), &stats_proxy_));

5. StartNextDecode 的處理流程分析
./video/video_receive_stream.cc

void VideoReceiveStream::StartNextDecode() {
    TRACE_EVENT0("webrtc", "VideoReceiveStream::StartNextDecode");
    // 參見流程 6
    frame_buffer_->NextFrame(
        GetWaitMs(), keyframe_required_, &decode_queue_,
        /* encoded frame handler */
        [this](std::unique_ptr<EncodedFrame> frame, ReturnReason res) {
            RTC_DCHECK_EQ(frame == nullptr, res == ReturnReason::kTimeout);
            RTC_DCHECK_EQ(frame != nullptr, res == ReturnReason::kFrameFound);
            decode_queue_.PostTask([this, frame = std::move(frame)]() mutable {
                RTC_DCHECK_RUN_ON(&decode_queue_);
                if (decoder_stopped_)
                    return;
                if (frame) {
                    // 開始進行解碼處理,參見下面的流程 10 開始準備解碼數據了
                    HandleEncodedFrame(std::move(frame));
                } else {
                    HandleFrameBufferTimeout();
                }
                StartNextDecode();
            });
        }
    );
}

6. jitterbuffer 的 NextFrame 函數的定義
./modules/video_coding/frame_buffer2.cc

void FrameBuffer::NextFrame(
    int64_t max_wait_time_ms,
    bool keyframe_required,
    rtc::TaskQueue* callback_queue,
    std::function<void(std::unique_ptr<EncodedFrame>, ReturnReason)> handler) {
    RTC_DCHECK_RUN_ON(callback_queue);
    TRACE_EVENT0("webrtc", "FrameBuffer::NextFrame");
    int64_t latest_return_time_ms =
        clock_->TimeInMilliseconds() + max_wait_time_ms;
    rtc::CritScope lock(&crit_);
    if (stopped_) {
        return;
    }
    latest_return_time_ms_ = latest_return_time_ms;
    keyframe_required_ = keyframe_required;
    // 上述的 lambada 就是這個
    frame_handler_ = handler;
    callback_queue_ = callback_queue;
    // 參見下面流程 7
    StartWaitForNextFrameOnQueue();
}

7.
./modules/video_coding/frame_buffer2.cc

void FrameBuffer::StartWaitForNextFrameOnQueue() {
    RTC_DCHECK(callback_queue_);
    RTC_DCHECK(!callback_task_.Running());
    // 這個裏面從接收的隊列裏 frames_ 取出一幀放到待解碼隊列 參見流程 8
    int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds());
    callback_task_ = RepeatingTaskHandle::DelayedStart(
        callback_queue_->Get(), TimeDelta::ms(wait_ms), [this] {
            // If this task has not been cancelled, we did not get any new frames
            // while waiting. Continue with frame delivery.
            rtc::CritScope lock(&crit_);
            if (!frames_to_decode_.empty()) {
                // 這個首先調用 GetNextFrame 流程 9 ,然後再調用上面的 lambada 流程 10
                // We have frames, deliver!
                frame_handler_(absl::WrapUnique(GetNextFrame()), kFrameFound);
                CancelCallback();
                return TimeDelta::Zero();  // Ignored.
            } else if (clock_->TimeInMilliseconds() >= latest_return_time_ms_) {
                // We have timed out, signal this and stop repeating.
                frame_handler_(nullptr, kTimeout);
                CancelCallback();
                return TimeDelta::Zero();  // Ignored.
            } else {
                // If there's no frames to decode and there is still time left, it
                // means that the frame buffer was cleared between creation and
                // execution of this task. Continue waiting for the remaining time.
                int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds());
                return TimeDelta::ms(wait_ms);
            }
        }
    );
}

    ./rtc_base/task_utils/repeating_task.h

template <class Closure>
    static RepeatingTaskHandle DelayedStart(TaskQueueBase* task_queue,
        TimeDelta first_delay,
        Closure&& closure) {
        auto repeating_task = std::make_unique<webrtc_repeating_task_impl::RepeatingTaskImpl<Closure>>(
            task_queue, first_delay, std::forward<Closure>(closure));
        auto* repeating_task_ptr = repeating_task.get();
        task_queue->PostDelayedTask(std::move(repeating_task), first_delay.ms());
        return RepeatingTaskHandle(repeating_task_ptr);
    }

8.

int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
    // frames_ 接收隊列
    for (auto frame_it = frames_.begin();
        frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
        ++frame_it) {        
        EncodedFrame* frame = frame_it->second.frame.get();
        
        std::vector<FrameMap::iterator> current_superframe;
        current_superframe.push_back(frame_it);
        bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer;
        FrameMap::iterator next_frame_it = frame_it;
        while (true) {
            ++next_frame_it;
            if (next_frame_it == frames_.end() ||
                next_frame_it->first.picture_id != frame->id.picture_id ||
                !next_frame_it->second.continuous) {
                break;
            }

            // Check if the next frame has some undecoded references other than
            // the previous frame in the same superframe.
            size_t num_allowed_undecoded_refs =
                (next_frame_it->second.frame->inter_layer_predicted) ? 1 : 0;
            if (next_frame_it->second.num_missing_decodable >
                num_allowed_undecoded_refs) {
                break;
            }
            
            // All frames in the superframe should have the same timestamp.
            if (frame->Timestamp() != next_frame_it->second.frame->Timestamp()) {
                RTC_LOG(LS_WARNING) << "Frames in a single superframe have different"
                    " timestamps. Skipping undecodable superframe.";
                break;
            }
            
            // 獲取一幀
            current_superframe.push_back(next_frame_it);
            last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer;
        }
    }
    // 解碼隊列
    frames_to_decode_ = std::move(current_superframe);
}


    // 我們講一下 frames_ 數據的由來
    // 參見博文 https://blog.csdn.net/freeabc/article/details/106142951 知道 視頻流接收最後都調用

int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame)
        // 把數據加到這個隊列
        auto info = frames_.emplace(id, FrameInfo()).first;
        // 接收到的視頻數據
        info->second.frame = std::move(frame);                
        // 這個激發
        new_continuous_frame_event_.Set();
        // 繼續投遞一個任務繼續執行 StartWaitForNextFrameOnQueue
        if (callback_queue_) {
            callback_queue_->PostTask([this] {
                rtc::CritScope lock(&crit_);
                if (!callback_task_.Running())
                    return;
                RTC_CHECK(frame_handler_);
                callback_task_.Stop();
                StartWaitForNextFrameOnQueue();
        });
    }

9.

EncodedFrame* FrameBuffer::GetNextFrame() {
    std::vector<EncodedFrame*> frames_out;
    for (FrameMap::iterator& frame_it : frames_to_decode_) {
        EncodedFrame* frame = frame_it->second.frame.release();
        frames_out.push_back(frame);
    }
    
    UpdateJitterDelay();
    UpdateTimingFrameInfo();
    
    return CombineAndDeleteFrames(frames_out);
}

10.
./video/video_receive_stream.cc 

void VideoReceiveStream::HandleEncodedFrame(std::unique_ptr<EncodedFrame> frame) {
    int64_t now_ms = clock_->TimeInMilliseconds();
    
    // Current OnPreDecode only cares about QP for VP8.
    int qp = -1;
    if (frame->CodecSpecific()->codecType == kVideoCodecVP8) {
        if (!vp8::GetQp(frame->data(), frame->size(), &qp)) {
            RTC_LOG(LS_WARNING) << "Failed to extract QP from VP8 video frame";
        }
    }
    stats_proxy_.OnPreDecode(frame->CodecSpecific()->codecType, qp);
    HandleKeyFrameGeneration(frame->FrameType() == VideoFrameType::kVideoFrameKey,
        now_ms);

    // 參見 10.1 的流程分析
    int decode_result = video_receiver_.Decode(frame.get());
    if (decode_result == WEBRTC_VIDEO_CODEC_OK ||
        decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
        keyframe_required_ = false;
        frame_decoded_ = true;
    rtp_video_stream_receiver_.FrameDecoded(frame->id.picture_id);
    
    if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME)
        RequestKeyFrame(now_ms);
    } else if (!frame_decoded_ || !keyframe_required_ ||
        (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ < now_ms)) {
        keyframe_required_ = true;
        // TODO(philipel): Remove this keyframe request when downstream project
        //                 has been fixed.
        RequestKeyFrame(now_ms);
    }

    if (encoded_frame_buffer_function_) {
        frame->Retain();
        encoded_frame_buffer_function_(WebRtcRecordableEncodedFrame(*frame));
    }
}

    10.1 VideoReceiver2 video_receiver_;
    ./modules/video_coding/video_receiver2.cc

 int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) {
        RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
        TRACE_EVENT0("webrtc", "VideoReceiver2::Decode");
        // Change decoder if payload type has changed
        VCMGenericDecoder* decoder =
            codecDataBase_.GetDecoder(*frame, &decodedFrameCallback_);
        if (decoder == nullptr) {
            return VCM_NO_CODEC_REGISTERED;
        }
        // 解碼器流程分析完畢,具體解碼具體流程參見 10.2
        return decoder->Decode(*frame, clock_->TimeInMilliseconds());
    }


    我們分析 codecDataBase_ 的由來,參見這個函數,我們看到就是 VideoReceiveStream 的 video_decoders_
    ./video/video_receive_stream.cc

void VideoReceiveStream::Start()    
        // 這個地方一般把三種編碼都註冊進去 VP8, VP9, h264(avc)
        for (const Decoder& decoder : config_.decoders) {
            // 參見下面的 LegacyCreateVideoDecoder 分析
            std::unique_ptr<VideoDecoder> video_decoder =
                decoder.decoder_factory->LegacyCreateVideoDecoder(decoder.video_format,
                    config_.stream_id);
            if (!decoded_output_file.empty()) {
                char filename_buffer[256];
                rtc::SimpleStringBuilder ssb(filename_buffer);
                ssb << decoded_output_file << "/webrtc_receive_stream_"
                    << this->config_.rtp.remote_ssrc << "-" << rtc::TimeMicros()
                    << ".ivf";
                    video_decoder = CreateFrameDumpingDecoderWrapper(std::move(video_decoder), FileWrapper::OpenWriteOnly(ssb.str()));
            }
            video_decoders_.push_back(std::move(video_decoder));
            video_receiver_.RegisterExternalDecoder(video_decoders_.back().get(),
                decoder.payload_type);
        }


        // 我們看看 RegisterExternalDecoder 函數
        ./modules/video_coding/decoder_database.h

void VCMDecoderDataBase::RegisterExternalDecoder(VideoDecoder* external_decoder, uint8_t payload_type) {
            // If payload value already exists, erase old and insert new.
            VCMExtDecoderMapItem* ext_decoder =    new VCMExtDecoderMapItem(external_decoder, payload_type);
            DeregisterExternalDecoder(payload_type);
            dec_external_map_[payload_type] = ext_decoder;
        }


        // LegacyCreateVideoDecoder 分析
        我們繼續分析 LegacyCreateVideoDecoder 解碼器的創建過程,首先分析解碼類廠,然後分析 CreateVideoDecoder 函數
        ./api/video_codecs/video_decoder_factory.cc

std::unique_ptr<VideoDecoder> VideoDecoderFactory::LegacyCreateVideoDecoder(
            const SdpVideoFormat& format,
            const std::string& receive_stream_id) {
            return CreateVideoDecoder(format);
        }


        //------------------------------------------------
        // 解碼類廠的創建
        //------------------------------------------------
        ./sdk/android/src/jni/pc/peer_connection_factory.cc

ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
            JNIEnv* jni,
            const JavaParamRef<jobject>& jcontext,
            const JavaParamRef<jobject>& joptions,
            rtc::scoped_refptr<AudioDeviceModule> audio_device_module,
            rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
            rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
            const JavaParamRef<jobject>& jencoder_factory,
            const JavaParamRef<jobject>& jdecoder_factory,
            rtc::scoped_refptr<AudioProcessing> audio_processor,
            std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
            std::unique_ptr<NetworkControllerFactoryInterface>
                network_controller_factory,
            std::unique_ptr<NetworkStatePredictorFactoryInterface>
                network_state_predictor_factory,
            std::unique_ptr<MediaTransportFactory> media_transport_factory,
                std::unique_ptr<NetEqFactory> neteq_factory)
            media_dependencies.video_decoder_factory =
                absl::WrapUnique(CreateVideoDecoderFactory(jni, jdecoder_factory));


        ./sdk/android/src/jni/pc/video.cc

VideoDecoderFactory* CreateVideoDecoderFactory(
            JNIEnv* jni,
            const JavaRef<jobject>& j_decoder_factory) {
            return IsNull(jni, j_decoder_factory)
                ? nullptr
                : new VideoDecoderFactoryWrapper(jni, j_decoder_factory);
        }


        // 類廠創建完畢,就是一個 VideoDecoderFactoryWrapper 對象
        // CreateVideoDecoder 函數如下:

        ./sdk/android/src/jni/video_decoder_factory_wrapper.cc

std::unique_ptr<VideoDecoder> VideoDecoderFactoryWrapper::CreateVideoDecoder(
        const SdpVideoFormat& format) {
            JNIEnv* jni = AttachCurrentThreadIfNeeded();
            ScopedJavaLocalRef<jobject> j_codec_info =
                SdpVideoFormatToVideoCodecInfo(jni, format);
            // 參見流程 1
            ScopedJavaLocalRef<jobject> decoder = Java_VideoDecoderFactory_createDecoder(
                jni, decoder_factory_, j_codec_info);
            if (!decoder.obj())
                return nullptr;
            // JNI 層的對應 Java 層的 video decoder 對象,參見流程 2
            return JavaToNativeVideoDecoder(jni, decoder);
        }


        1.
        // 這個就是調用 Java 層的產生硬解碼器了

static base::android::ScopedJavaLocalRef<jobject> Java_VideoDecoderFactory_createDecoder(JNIEnv*
            env, const base::android::JavaRef<jobject>& obj, const base::android::JavaRef<jobject>& info) {
            jclass clazz = org_webrtc_VideoDecoderFactory_clazz(env);
            CHECK_CLAZZ(env, obj.obj(),
            org_webrtc_VideoDecoderFactory_clazz(env), NULL);
            jni_generator::JniJavaCallContextChecked call_context;
            call_context.Init<base::android::MethodID::TYPE_INSTANCE>(
                env,
                clazz,
                "createDecoder",
                "(Lorg/webrtc/VideoCodecInfo;)Lorg/webrtc/VideoDecoder;",
                &g_org_webrtc_VideoDecoderFactory_createDecoder);
                
            jobject ret =
                env->CallObjectMethod(obj.obj(), call_context.base.method_id, info.obj());
            return base::android::ScopedJavaLocalRef<jobject>(env, ret);
        }

        // Android Java

        VideoDecoder DefaultVideoDecoderFactorypublic::createDecoder(VideoCodecInfo codecType) {
            VideoDecoder softwareDecoder = softwareVideoDecoderFactory.createDecoder(codecType);
            final VideoDecoder hardwareDecoder = hardwareVideoDecoderFactory.createDecoder(codecType);
            if (softwareDecoder == null && platformSoftwareVideoDecoderFactory != null) {
                softwareDecoder = platformSoftwareVideoDecoderFactory.createDecoder(codecType);
            }
            if (hardwareDecoder != null && softwareDecoder != null) {
                // 一般都是這個返回給底層
                // Both hardware and software supported, wrap it in a software fallback
                return new VideoDecoderFallback(
                    /* fallback= */ softwareDecoder, /* primary= */ hardwareDecoder);
            }
            return hardwareDecoder != null ? hardwareDecoder : softwareDecoder;
        }

        // Android Java --- HardwareVideoDecoderFactory 的基類 MediaCodecVideoDecoderFactory        

        public VideoDecoder MediaCodecVideoDecoderFactory::createDecoder(VideoCodecInfo codecType) {
            VideoCodecType type = VideoCodecType.valueOf(codecType.getName());
            MediaCodecInfo info = findCodecForType(type);

            if (info == null) {
              return null;
            }

            CodecCapabilities capabilities = info.getCapabilitiesForType(type.mimeType());
            return new AndroidVideoDecoder(new MediaCodecWrapperFactoryImpl(), info.getName(), type,
                MediaCodecUtils.selectColorFormat(MediaCodecUtils.DECODER_COLOR_FORMATS, capabilities),
                sharedContext);
        }


        2.
        ./sdk/android/src/jni/video_decoder_wrapper.cc

std::unique_ptr<VideoDecoder> JavaToNativeVideoDecoder(
            JNIEnv* jni,
            const JavaRef<jobject>& j_decoder) {
            // 通知 Java 產生解碼器
            const jlong native_decoder =
                Java_VideoDecoder_createNativeVideoDecoder(jni, j_decoder);
            VideoDecoder* decoder;
            if (native_decoder == 0) {
                decoder = new VideoDecoderWrapper(jni, j_decoder);
            } else {
                decoder = reinterpret_cast<VideoDecoder*>(native_decoder);
            }
            return std::unique_ptr<VideoDecoder>(decoder);
        }

        Java_VideoDecoder_createNativeVideoDecoder 這個會調用 Java 層的

public long VideoDecoderFallback::createNativeVideoDecoder() {
            return nativeCreateDecoder(fallback, primary);
        }

 

JNI_GENERATOR_EXPORT jlong Java_org_webrtc_VideoDecoderFallback_nativeCreateDecoder(
            JNIEnv* env,
            jclass jcaller,
            jobject fallback,
            jobject primary) {
            return JNI_VideoDecoderFallback_CreateDecoder(env, base::android::JavaParamRef<jobject>(env,
                fallback), base::android::JavaParamRef<jobject>(env, primary));
        }

        ./sdk/android/src/jni/video_decoder_fallback.cc

 static jlong JNI_VideoDecoderFallback_CreateDecoder(
            JNIEnv* jni,
            const JavaParamRef<jobject>& j_fallback_decoder,
            const JavaParamRef<jobject>& j_primary_decoder)
            VideoDecoder* nativeWrapper =
                CreateVideoDecoderSoftwareFallbackWrapper(std::move(fallback_decoder), std::move(primary_decoder))

        ./api/video_codecs/video_decoder_software_fallback_wrapper.cc

std::unique_ptr<VideoDecoder> CreateVideoDecoderSoftwareFallbackWrapper(
            std::unique_ptr<VideoDecoder> sw_fallback_decoder,
            std::unique_ptr<VideoDecoder> hw_decoder) {
          return std::make_unique<VideoDecoderSoftwareFallbackWrapper>(
              std::move(sw_fallback_decoder), std::move(hw_decoder));
        }

        // VideoDecoderSoftwareFallbackWrapper 這個就是 VCMGenericDecoder 裏的 decoder_
            
    10.2
    ./modules/video_coding/generic_decoder.cc

    int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
        TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp", frame.Timestamp());

        _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
        _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
        _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
        _frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
        _frameInfos[_nextFrameInfoIdx].ntp_time_ms = frame.EncodedImage().ntp_time_ms_;
        _frameInfos[_nextFrameInfoIdx].packet_infos = frame.PacketInfos();

        // Set correctly only for key frames. Thus, use latest key frame
        // content type. If the corresponding key frame was lost, decode will fail
        // and content type will be ignored.
        if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
            _frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
            _last_keyframe_content_type = frame.contentType();
        } else {
            _frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
        }
        _callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);

        _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;    
        // 根據 10.1 的分析,我們知道 decoder_    就是 VideoDecoderSoftwareFallbackWrapper
        int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
            frame.RenderTimeMs());

        _callback->OnDecoderImplementationName(decoder_->ImplementationName());
        if (ret < WEBRTC_VIDEO_CODEC_OK) {
            RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
                << frame.Timestamp() << ", error code: " << ret;
            _callback->Pop(frame.Timestamp());
            return ret;
        } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT) {
            // No output
            _callback->Pop(frame.Timestamp());
        }
        return ret;
    }    

    ./api/video_codecs/video_decoder_software_fallback_wrapper.cc

    int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
        const EncodedImage& input_image,
        bool missing_frames,
        int64_t render_time_ms) {        
        switch (decoder_type_) {
        case DecoderType::kNone:
            return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
        case DecoderType::kHardware: {
            int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
            ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms);
            if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
                return ret;
            }

            // HW decoder returned WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE or
            // initialization failed, fallback to software.
            if (!InitFallbackDecoder()) {
                return ret;
            }

            // Fallback decoder initialized, fall-through.
            RTC_FALLTHROUGH();
        }
        case DecoderType::kFallback:
            return fallback_decoder_->Decode(input_image, missing_frames, render_time_ms);
        default:
            RTC_NOTREACHED();
            return WEBRTC_VIDEO_CODEC_ERROR;
        }
    }

    // Java 層的 org.webrtc 硬解碼函數

    public VideoCodecStatus AndroidVideoDecoder::decode(EncodedImage frame, DecodeInfo info)
    
        int index;
        try {
            index = codec.dequeueInputBuffer(DEQUEUE_INPUT_TIMEOUT_US);
        } catch (IllegalStateException e) {
            Logging.e(TAG, "dequeueInputBuffer failed", e);
            return VideoCodecStatus.ERROR;
        }
        if (index < 0) {
            // Decoder is falling behind.  No input buffers available.
            // The decoder can't simply drop frames; it might lose a key frame.
            Logging.e(TAG, "decode() - no HW buffers available; decoder falling behind");
            return VideoCodecStatus.ERROR;
        }

        ByteBuffer buffer;
        try {
            buffer = codec.getInputBuffers()[index];
        } catch (IllegalStateException e) {
            Logging.e(TAG, "getInputBuffers failed", e);
            return VideoCodecStatus.ERROR;
        }

        if (buffer.capacity() < size) {
            Logging.e(TAG, "decode() - HW buffer too small");
            return VideoCodecStatus.ERROR;
        }
        buffer.put(frame.buffer);

        frameInfos.offer(new FrameInfo(SystemClock.elapsedRealtime(), frame.rotation));
        try {
            codec.queueInputBuffer(index, 0 /* offset */, size,
                TimeUnit.NANOSECONDS.toMicros(frame.captureTimeNs), 0 /* flags */);
        } catch (IllegalStateException e) {
            Logging.e(TAG, "queueInputBuffer failed", e);
            frameInfos.pollLast();
            return VideoCodecStatus.ERROR;
        }
        if (keyFrameRequired) {
            keyFrameRequired = false;
        }
        return VideoCodecStatus.OK;


    //-----------------------------------------------------------------------
    // 致此,我們分析的數據從 jitterbuffer 到解碼器進行解碼的流程基本完畢!
    //-----------------------------------------------------------------------

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章