----------------------------------------------------------------------------------------------------------------------------------------
一分鐘快速搭建 rtmpd 服務器: https://blog.csdn.net/freeabc/article/details/102880984
軟件下載地址: http://www.qiyicc.com/download/rtmpd.rar
github 地址:https://github.com/superconvert/smart_rtmpd
-----------------------------------------------------------------------------------------------------------------------------------------
webrtc 視頻流接收流程分析從 socket 接收數據一直到放入 jitterbuffer 內整個處理流程與環節
部分流程依賴於 webrtc 中有關 socket 運行機制以及 stun 收發過程 及 Candidates 生成流程分析
//******************************************************************************
//
// 下面這個流程分析的是從 socket 接收數據,並最終放入 jitterbuffer 中,以視頻
// 數據爲主,這樣大家需要改善接收 pipeline 中的那些環節的參數,提供一個參考
//
//******************************************************************************
1. socket 接收流程不再分析,參見上面的分析,這個流程建立在雙方 candidate 交換之後, stun 之後,
就開始交換 candidate,選取最合適的 candidate 然後建立鏈接,就可以互發互收音視頻數據了。
UDPPort::OnReadPacket --->
if (Connection* conn = GetConnection(remote_addr)) {
conn->OnReadPacket(data, size, packet_time_us);
} else {
1.1 下面我講講這個 Connection 的由來
./p2p/base/port.cc
Connection* Port::GetConnection(const rtc::SocketAddress& remote_addr) {
AddressMap::const_iterator iter = connections_.find(remote_addr);
if (iter != connections_.end())
return iter->second;
else
return NULL;
}
其實就是 connections_ 存儲的一個對象,下面我們分析一下 connections_ 裏怎麼來的
// Java 層會調用過來
./sdk/android/src/jni/pc/peer_connection.cc
static jboolean JNI_PeerConnection_AddIceCandidate(
JNIEnv* jni,
const JavaParamRef<jobject>& j_pc,
const JavaParamRef<jstring>& j_sdp_mid,
jint j_sdp_mline_index,
const JavaParamRef<jstring>& j_candidate_sdp)
return ExtractNativePC(jni, j_pc)->AddIceCandidate(candidate.get());
./pc/peer_connection.cc
bool PeerConnection::AddIceCandidate(
const IceCandidateInterface* ice_candidate)
./pc/peer_connection.cc
bool PeerConnection::UseCandidate(const IceCandidateInterface* candidate)
RTCError error = transport_controller_->AddRemoteCandidates(result.value()->name, candidates);
./pc/jsep_transport_controller.cc
RTCError JsepTransportController::AddRemoteCandidates(
const std::string& transport_name,
const cricket::Candidates& candidates)
return jsep_transport->AddRemoteCandidates(candidates);
./pc/jsep_transport.cc
webrtc::RTCError JsepTransport::AddRemoteCandidates(
const Candidates& candidates)
transport->internal()->ice_transport()->AddRemoteCandidate(candidate);
./p2p/base/p2p_transport_channel.cc
void P2PTransportChannel::AddRemoteCandidate(const Candidate& candidate)
FinishAddingRemoteCandidate(new_remote_candidate);
./p2p/base/p2p_transport_channel.cc
void P2PTransportChannel::FinishAddingRemoteCandidate(
const Candidate& new_remote_candidate)
CreateConnections(new_remote_candidate, NULL);
./p2p/base/p2p_transport_channel.cc
bool P2PTransportChannel::CreateConnections(const Candidate& remote_candidate,
PortInterface* origin_port)
std::vector<PortInterface*>::reverse_iterator it;
for (it = ports_.rbegin(); it != ports_.rend(); ++it) {
if (CreateConnection(*it, remote_candidate, origin_port)) {
if (*it == origin_port)
created = true;
}
}
bool P2PTransportChannel::CreateConnection(PortInterface* port,
const Candidate& remote_candidate,
PortInterface* origin_port)
Connection* connection = port->CreateConnection(remote_candidate, origin);
AddConnection(connection);
// AddConnection 裏掛接
void P2PTransportChannel::AddConnection(Connection* connection)
connection->SignalReadPacket.connect(this, &P2PTransportChannel::OnReadPacket);
./p2p/base/turn_port.cc
Connection* TurnPort::CreateConnection(const Candidate& remote_candidate, CandidateOrigin origin)
ProxyConnection* conn = new ProxyConnection(this, index, remote_candidate);
AddOrReplaceConnection(conn);
./p2p/base/port.cc
void Port::AddOrReplaceConnection(Connection* conn)
auto ret = connections_.insert(std::make_pair(conn->remote_candidate().address(), conn));
所以正式建立連接後,會進入下面的函數其實是 ProxyConnection 對象,ProxyConnection 派生於 Connection
2.
Connection::OnReadPacket --->
SignalReadPacket(this, data, size, packet_time_us);
上述流程中其實這句掛接 P2PTransportChannel::OnReadPacket
3.
P2PTransportChannel::OnReadPacket() --->
SignalReadPacket(this, data, len, packet_time_us, 0);
3.1 爲什麼 P2PTransportChannel::OnReadPacket 調用 DtlsTransport::OnReadPacket
./p2p/base/dtls_transport.cc
// ice_transport_ 就是 P2PTransportChannel,具體參考下面的代碼,
ice_transport_->SignalReadPacket.connect(this, &DtlsTransport::OnReadPacket);
4.
DtlsTransport::OnReadPacket() --->
case DTLS_TRANSPORT_CONNECTING:
case DTLS_TRANSPORT_CONNECTED:
if (IsDtlsPacket(data, size)) {
// 這個地方表明 ssl 握手已經成功, 正式處理數據了
if (!HandleDtlsPacket(data, size)) {
RTC_LOG(LS_ERROR) << ToString() << ": Failed to handle DTLS packet.";
return;
}
}
5.
bool DtlsTransport::HandleDtlsPacket(const char* data, size_t size)
return downward_->OnPacketReceived(data, size);
5.1 我們看 downward_ 其實就是 StreamInterfaceChannel
bool DtlsTransport::SetupDtls() 裏分配的 downward_
StreamInterfaceChannel* downward = new StreamInterfaceChannel(ice_transport_);
dtls_.reset(rtc::SSLStreamAdapter::Create(downward));
if (!dtls_) {
RTC_LOG(LS_ERROR) << ToString() << ": Failed to create DTLS adapter.";
delete downward;
return false;
}
downward_ = downward;
dtls_->SetIdentity(local_certificate_->identity()->GetReference());
dtls_->SetMode(rtc::SSL_MODE_DTLS);
dtls_->SetMaxProtocolVersion(ssl_max_version_);
dtls_->SetServerRole(*dtls_role_);
//------------------------------------------------------------
// 這句綁定了下面的流程 7 調用流程 8
//------------------------------------------------------------
dtls_->SignalEvent.connect(this, &DtlsTransport::OnDtlsEvent);
dtls_->SignalSSLHandshakeError.connect(this, &DtlsTransport::OnDtlsHandshakeError);
5.2 rtc::SSLStreamAdapter::Create(downward) 裏面綁定了 SignalEvent 到 StreamAdapterInterface::OnEvent
./rtc_base/ssl_stream_adapter.cc
SSLStreamAdapter* SSLStreamAdapter::Create(StreamInterface* stream) {
return new OpenSSLStreamAdapter(stream);
}
./rtc_base/ssl_stream_adapter.cc
SSLStreamAdapter::SSLStreamAdapter(StreamInterface* stream)
: StreamAdapterInterface(stream) {}
./rtc_base/stream.cc
StreamAdapterInterface::StreamAdapterInterface(StreamInterface* stream, bool owned) : stream_(stream), owned_(owned) {
if (nullptr != stream_)
//-----------------------------------------------
// 這個地方掛接流程 6 調用 7 的原因
//-----------------------------------------------
stream_->SignalEvent.connect(this, &StreamAdapterInterface::OnEvent);
}
6.
./p2p/base/dtls_transport.cc
bool StreamInterfaceChannel::OnPacketReceived(const char* data, size_t size) {
// We force a read event here to ensure that we don't overflow our queue.
bool ret = packets_.WriteBack(data, size, NULL);
RTC_CHECK(ret) << "Failed to write packet to queue.";
if (ret) {
SignalEvent(this, rtc::SE_READ, 0);
}
return ret;
}
7.
./rtc_base/stream.cc
void StreamAdapterInterface::OnEvent(StreamInterface* stream, int events, int err)
SignalEvent(this, events, err);
//-----------------------------------------------
// 參見上面的 DtlsTransport::SetupDtls
//-----------------------------------------------
8.
./p2p/base/dtls_transport.cc
void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err)
SignalReadPacket(this, buf, read, rtc::TimeMicros(), 0);
下面我們分析一下 RtpTransport 怎麼掛載了 DtlsTransport 的 SignalReadPacket
// 上層的 java 調用這個
./sdk/android/src/jni/pc/peer_connection.cc
static void JNI_PeerConnection_SetRemoteDescription(
JNIEnv* jni,
const JavaParamRef<jobject>& j_pc,
const JavaParamRef<jobject>& j_observer,
const JavaParamRef<jobject>& j_sdp)
ExtractNativePC(jni, j_pc)->SetRemoteDescription(observer, JavaToNativeSessionDescription(jni, j_sdp).release());
./pc/peer_connection.cc
void PeerConnection::SetRemoteDescription(
std::unique_ptr<SessionDescriptionInterface> desc,
rtc::scoped_refptr<SetRemoteDescriptionObserverInterface> observer)
this_weak_ptr->DoSetRemoteDescription(std::move(desc), std::move(observer));
./pc/peer_connection.cc
void PeerConnection::DoSetRemoteDescription(
std::unique_ptr<SessionDescriptionInterface> desc,
rtc::scoped_refptr<SetRemoteDescriptionObserverInterface> observer)
error = ApplyRemoteDescription(std::move(desc));
./pc/peer_connection.cc
RTCError PeerConnection::ApplyRemoteDescription(
std::unique_ptr<SessionDescriptionInterface> desc)
RTCError error = PushdownTransportDescription(cricket::CS_REMOTE, type);
./pc/peer_connection.cc
RTCError PeerConnection::PushdownTransportDescription(
cricket::ContentSource source,
SdpType type)
return transport_controller_->SetRemoteDescription(type, sdesc->description());
./pc/jsep_transport_controller.cc
RTCError JsepTransportController::SetRemoteDescription(
SdpType type,
const cricket::SessionDescription* description)
return ApplyDescription_n(/*local=*/false, type, description);
./pc/jsep_transport_controller.cc
RTCError JsepTransportController::ApplyDescription_n(
bool local,
SdpType type,
const cricket::SessionDescription* description)
error = MaybeCreateJsepTransport(local, content_info, *description);
./pc/jsep_transport_controller.cc
RTCError JsepTransportController::MaybeCreateJsepTransport(
bool local,
const cricket::ContentInfo& content_info,
const cricket::SessionDescription& description)
//--------------------------------------------------------------------------
// 看到這個 rtp_dtls_transport 了嗎?這個就是我們的 DtlsTransport,在這個地方創建的
//--------------------------------------------------------------------------
std::unique_ptr<cricket::DtlsTransportInternal> rtp_dtls_transport =
CreateDtlsTransport(content_info, ice->internal(), nullptr);
dtls_srtp_transport = CreateDtlsSrtpTransport(
content_info.name, rtp_dtls_transport.get(), rtcp_dtls_transport.get());
./pc/jsep_transport_controller.cc
std::unique_ptr<webrtc::DtlsSrtpTransport>
JsepTransportController::CreateDtlsSrtpTransport(
const std::string& transport_name,
cricket::DtlsTransportInternal* rtp_dtls_transport,
cricket::DtlsTransportInternal* rtcp_dtls_transport) {
RTC_DCHECK(network_thread_->IsCurrent());
auto dtls_srtp_transport = std::make_unique<webrtc::DtlsSrtpTransport>(rtcp_dtls_transport == nullptr);
if (config_.enable_external_auth) {
dtls_srtp_transport->EnableExternalAuth();
}
dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport, rtcp_dtls_transport);
dtls_srtp_transport->SetActiveResetSrtpParams(config_.active_reset_srtp_params);
dtls_srtp_transport->SignalDtlsStateChange.connect(this, &JsepTransportController::UpdateAggregateStates_n);
return dtls_srtp_transport;
}
void DtlsSrtpTransport::SetDtlsTransports(
cricket::DtlsTransportInternal* rtp_dtls_transport,
cricket::DtlsTransportInternal* rtcp_dtls_transport) {
// Transport names should be the same.
if (rtp_dtls_transport && rtcp_dtls_transport)
SetRtpPacketTransport(rtp_dtls_transport);
./pc/rtp_transport.cc
void RtpTransport::SetRtpPacketTransport(
rtc::PacketTransportInternal* new_packet_transport)
//---------------------------------------------------------------------
// 這句把 DtlsTransport 與 RtpTransport::OnReadPacket 調用掛接,所以流程 8 會調用到 流程 9
//---------------------------------------------------------------------
new_packet_transport->SignalReadPacket.connect(this, &RtpTransport::OnReadPacket);
9.
./pc/rtp_transport.cc
void RtpTransport::OnReadPacket(rtc::PacketTransportInternal* transport,
const char* data,
size_t len,
const int64_t& packet_time_us,
int flags)
OnRtpPacketReceived(std::move(packet), packet_time_us);
void RtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us) {
DemuxPacket(packet, packet_time_us);
}
void RtpTransport::DemuxPacket(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us) {
webrtc::RtpPacketReceived parsed_packet(&header_extension_map_);
if (!parsed_packet.Parse(std::move(packet))) {
RTC_LOG(LS_ERROR) << "Failed to parse the incoming RTP packet before demuxing. Drop it.";
return;
}
if (packet_time_us != -1) {
parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
}
if (!rtp_demuxer_.OnRtpPacket(parsed_packet)) {
RTC_LOG(LS_WARNING) << "Failed to demux RTP packet: " << RtpDemuxer::DescribePacket(parsed_packet);
}
}
10.
bool RtpDemuxer::OnRtpPacket(const RtpPacketReceived& packet) {
RtpPacketSinkInterface* sink = ResolveSink(packet);
if (sink != nullptr) {
sink->OnRtpPacket(packet);
return true;
}
return false;
}
下面我們分析一下這個 Sink 到底是哪個對象
10.1
首先從這個的調用流程參考上面的分析,誰調用的這個函數,上面已經分析過了
./pc/peer_connection.cc
RTCError PeerConnection::ApplyRemoteDescription(
std::unique_ptr<SessionDescriptionInterface> desc)
error = UpdateSessionState(type, cricket::CS_REMOTE, remote_description()->description());
10.2
./pc/peer_connection.cc
RTCError PeerConnection::UpdateSessionState(
SdpType type,
cricket::ContentSource source,
const cricket::SessionDescription* description)
RTCError error = PushdownMediaDescription(type, source);
10.3
./pc/peer_connection.cc
RTCError PeerConnection::PushdownMediaDescription(
SdpType type,
cricket::ContentSource source) {
const SessionDescriptionInterface* sdesc =
(source == cricket::CS_LOCAL ? local_description()
: remote_description());
RTC_DCHECK(sdesc);
// Push down the new SDP media section for each audio/video transceiver.
for (const auto& transceiver : transceivers_) {
const ContentInfo* content_info =
FindMediaSectionForTransceiver(transceiver, sdesc);
// transceiver 的分析參見 10.3.1
// transceiver 的 channel 分析參見 10.3.2
cricket::ChannelInterface* channel = transceiver->internal()->channel();
if (!channel || !content_info || content_info->rejected) {
continue;
}
const MediaContentDescription* content_desc =
content_info->media_description();
if (!content_desc) {
continue;
}
std::string error;
bool success = (source == cricket::CS_LOCAL)
? channel->SetLocalContent(content_desc, type, &error)
// 參見流程 10.3.3
: channel->SetRemoteContent(content_desc, type, &error);
if (!success) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, error);
}
}
10.3.1 transceiver 對象的創建
上面的 transceivers_ 我們分析一下,在 PeerConnection::Initialize 創建的
./pc/peer_connection.cc
bool PeerConnection::Initialize(const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies)
if (!IsUnifiedPlan()) {
transceivers_.push_back(
RtpTransceiverProxyWithInternal<RtpTransceiver>::Create(
signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_AUDIO)));
transceivers_.push_back(
RtpTransceiverProxyWithInternal<RtpTransceiver>::Create(
signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_VIDEO)));
}
我們看到這是一個 RtpTransceiver 對象,它的 channel 就是 VideoChannel 參考下面的分析
10.3.2 transceiver 的 channel 由來
10.3.2.1
RTCError PeerConnection::ApplyRemoteDescription(
std::unique_ptr<SessionDescriptionInterface> desc)
RTCError error = CreateChannels(*remote_description()->description());
10.3.2.2
RTCError PeerConnection::CreateChannels(const SessionDescription& desc)
// 參見流程 10.3.2.2.1
cricket::VideoChannel* video_channel = CreateVideoChannel(video->name);
if (!video_channel) {
LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR,
"Failed to create video channel.");
}
// 這個地方掛接了 RtpTransceiver 的 channel 就是 VideoChannel
GetVideoTransceiver()->internal()->SetChannel(video_channel);
10.3.2.2.1下面這個創建了 channel
cricket::VideoChannel* PeerConnection::CreateVideoChannel(
const std::string& mid) {
RtpTransportInternal* rtp_transport = GetRtpTransport(mid);
MediaTransportConfig media_transport_config =
transport_controller_->GetMediaTransportConfig(mid);
cricket::VideoChannel* video_channel = channel_manager()->CreateVideoChannel(
call_ptr_, configuration_.media_config, rtp_transport,
media_transport_config, signaling_thread(), mid, SrtpRequired(),
GetCryptoOptions(), &ssrc_generator_, video_options_,
video_bitrate_allocator_factory_.get());
if (!video_channel) {
return nullptr;
}
video_channel->SignalDtlsSrtpSetupFailure.connect(
this, &PeerConnection::OnDtlsSrtpSetupFailure);
video_channel->SignalSentPacket.connect(this,
&PeerConnection::OnSentPacket_w);
video_channel->SetRtpTransport(rtp_transport);
return video_channel;
}
//--------------------------- 分析 channel_manager ---------------------------------
// channel_manager 函數的定義,factory_ 是 PeerConnection 構造函數傳遞過來的,我們繼續分析構造函數
cricket::ChannelManager* PeerConnection::channel_manager() const {
return factory_->channel_manager();
}
PeerConnection 構造函數,我們看到其實 PeerConnectionFactory
PeerConnection::PeerConnection(PeerConnectionFactory* factory,
std::unique_ptr<RtcEventLog> event_log,
std::unique_ptr<Call> call) : factory_(factory),
有關 PeerConnection 的創建,我們看到就是傳遞的 this(PeerConnectionFactory*) 到 PeerConnection
rtc::scoped_refptr<PeerConnectionInterface>
PeerConnectionFactory::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies)
rtc::scoped_refptr<PeerConnection> pc(new rtc::RefCountedObject<PeerConnection>(this, std::move(event_log),
std::move(call)));
因此上述的 factory_->channel_manager(),其實就是 PeerConnectionFactory::channel_manager
cricket::ChannelManager* PeerConnectionFactory::channel_manager() {
return channel_manager_.get();
}
在函數內部創建了 channel_manager_
bool PeerConnectionFactory::Initialize()
channel_manager_ = std::make_unique<cricket::ChannelManager>(
std::move(media_engine_), std::make_unique<cricket::RtpDataEngine>(),
worker_thread_, network_thread_);
// 到此我們知道 channel_manager_ 就是 cricket::ChannelManager 對象
// 下面分析 media_engine_,因爲下面流程用到了
PeerConnectionFactory 裏的 media_engine_ 來自構造函數
PeerConnectionFactory::PeerConnectionFactory(
PeerConnectionFactoryDependencies dependencies)
: wraps_current_thread_(false),
network_thread_(dependencies.network_thread),
worker_thread_(dependencies.worker_thread),
signaling_thread_(dependencies.signaling_thread),
task_queue_factory_(std::move(dependencies.task_queue_factory)),
media_engine_(std::move(dependencies.media_engine)),
PeerConnectionFactory 的創建在下面函數
./sdk/android/src/jni/pc/peer_connection_factory.cc
ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
JNIEnv* jni,
const JavaParamRef<jobject>& jcontext,
const JavaParamRef<jobject>& joptions,
rtc::scoped_refptr<AudioDeviceModule> audio_device_module,
rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
const JavaParamRef<jobject>& jencoder_factory,
const JavaParamRef<jobject>& jdecoder_factory,
rtc::scoped_refptr<AudioProcessing> audio_processor,
std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
std::unique_ptr<NetworkControllerFactoryInterface>
network_controller_factory,
std::unique_ptr<NetworkStatePredictorFactoryInterface>
network_state_predictor_factory,
std::unique_ptr<MediaTransportFactory> media_transport_factory,
std::unique_ptr<NetEqFactory> neteq_factory)
// 這個就是我們需要分析的 media_engine_
dependencies.media_engine = cricket::CreateMediaEngine(std::move(media_dependencies));
// 下面這句在 ./pc/peer_connection_factory.cc 裏就是創建 PeerConnectionFactory 對象
rtc::scoped_refptr<PeerConnectionFactoryInterface> factory =
CreateModularPeerConnectionFactory(std::move(dependencies));
函數 CreateMediaEngine 在
./media/engine/webrtc_media_engine.cc
std::unique_ptr<MediaEngineInterface> CreateMediaEngine(
MediaEngineDependencies dependencies) {
auto audio_engine = std::make_unique<WebRtcVoiceEngine>(
dependencies.task_queue_factory, std::move(dependencies.adm),
std::move(dependencies.audio_encoder_factory),
std::move(dependencies.audio_decoder_factory),
std::move(dependencies.audio_mixer),
std::move(dependencies.audio_processing));
#ifdef HAVE_WEBRTC_VIDEO
auto video_engine = std::make_unique<WebRtcVideoEngine>(
std::move(dependencies.video_encoder_factory),
std::move(dependencies.video_decoder_factory));
#else
auto video_engine = std::make_unique<NullWebRtcVideoEngine>();
#endif
return std::make_unique<CompositeMediaEngine>(std::move(audio_engine),
std::move(video_engine));
}
從這裏我們看到 media_engine_ 就是 CompositeMediaEngine , 裏面包含 WebRtcVoiceEngine 和 WebRtcVideoEngine 兩個引擎
所以下面的函數調用其實就是 WebRtcVideoEngine::CreateMediaChannel
// ----- GetVideoTransceiver()->internal()->SetChannel(video_channel) -----
// 表明 RtpTransceiver 的 channel 就是 VideoChannel 了
VideoChannel* ChannelManager::CreateVideoChannel(
webrtc::Call* call,
const cricket::MediaConfig& media_config,
webrtc::RtpTransportInternal* rtp_transport,
const webrtc::MediaTransportConfig& media_transport_config,
rtc::Thread* signaling_thread,
const std::string& content_name,
bool srtp_required,
const webrtc::CryptoOptions& crypto_options,
rtc::UniqueRandomIdGenerator* ssrc_generator,
const VideoOptions& options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
VideoMediaChannel* media_channel = media_engine_->video().CreateMediaChannel(
call, media_config, options, crypto_options,
video_bitrate_allocator_factory);
if (!media_channel) {
return nullptr;
}
auto video_channel = std::make_unique<VideoChannel>(
worker_thread_, network_thread_, signaling_thread,
absl::WrapUnique(media_channel), content_name, srtp_required,
crypto_options, ssrc_generator);
video_channel->Init_w(rtp_transport, media_transport_config);
VideoChannel* video_channel_ptr = video_channel.get();
video_channels_.push_back(std::move(video_channel));
return video_channel_ptr;
我們分析 media_engine_->video().CreateMediaChannel 參見下面的函數,media_channel 就是一個 WebRtcVideoChannel 對象
./media/engine/webrtc_video_engine.cc
VideoMediaChannel* WebRtcVideoEngine::CreateMediaChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
RTC_LOG(LS_INFO) << "CreateMediaChannel. Options: " << options.ToString();
return new WebRtcVideoChannel(call, config, options, crypto_options,
encoder_factory_.get(), decoder_factory_.get(),
video_bitrate_allocator_factory);
}
//-------------------------------------------------------------------------------
10.3.3
我們繼續分析 SetRemoteContent
./pc/channel.cc
bool BaseChannel::SetRemoteContent(const MediaContentDescription* content,
SdpType type,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "BaseChannel::SetRemoteContent");
return InvokeOnWorker<bool>(
RTC_FROM_HERE,
Bind(&BaseChannel::SetRemoteContent_w, this, content, type, error_desc));
}
./pc/channel.cc
bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
SdpType type,
std::string* error_desc)
if (!RegisterRtpDemuxerSink()) {
RTC_LOG(LS_ERROR) << "Failed to update video demuxing.";
return false;
}
./pc/channel.cc
bool BaseChannel::RegisterRtpDemuxerSink() {
RTC_DCHECK(rtp_transport_);
return network_thread_->Invoke<bool>(RTC_FROM_HERE, [this] {
return rtp_transport_->RegisterRtpDemuxerSink(demuxer_criteria_, this);
});
}
./pc/rtp_transport.cc
bool RtpTransport::RegisterRtpDemuxerSink(const RtpDemuxerCriteria& criteria,
RtpPacketSinkInterface* sink) {
rtp_demuxer_.RemoveSink(sink);
if (!rtp_demuxer_.AddSink(criteria, sink)) {
RTC_LOG(LS_ERROR) << "Failed to register the sink for RTP demuxer.";
return false;
}
return true;
}
我們看到 VideoChannel 做爲 Sink 加到 RtpTransport 裏的 rtp_demuxer_, 所以 RtpDemuxer::OnRtpPacket
會調用 VideoChannel::OnRtpPacket
11. VideoChannel的基類實現
./pc/channel.cc
void BaseChannel::OnRtpPacket(const webrtc::RtpPacketReceived& parsed_packet)
invoker_.AsyncInvoke<void>(
RTC_FROM_HERE, worker_thread_, [this, packet_buffer, packet_time_us] {
RTC_DCHECK(worker_thread_->IsCurrent());
// 其實這個就是 WebRtcVideoChannel
media_channel_->OnPacketReceived(packet_buffer, packet_time_us);
});
系統初始化過程中或
VideoMediaChannel* WebRtcVideoEngine::CreateMediaChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
RTC_LOG(LS_INFO) << "CreateMediaChannel. Options: " << options.ToString();
return new WebRtcVideoChannel(call, config, options, crypto_options,
encoder_factory_.get(), decoder_factory_.get(),
video_bitrate_allocator_factory);
}
12.
./media/engine/webrtc_video_engine.cc
void WebRtcVideoChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us)
if (call_->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, packet, packet_time_us) !=
webrtc::PacketReceiver::DELIVERY_OK) {
RTC_LOG(LS_WARNING) << "Failed to deliver RTP packet on re-delivery.";
return;
}
// 其實 call_->Receiver() 就是 Call 對象自身。
./call/call.cc
PacketReceiver* Call::Receiver() {
RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);
return this;
}
13.
./call/call.cc
PacketReceiver::DeliveryStatus Call::DeliverPacket(
MediaType media_type,
rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us) {
RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);
if (IsRtcp(packet.cdata(), packet.size()))
return DeliverRtcp(media_type, packet.cdata(), packet.size());
return DeliverRtp(media_type, std::move(packet), packet_time_us);
}
./call/call.cc
PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type,
rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us)
if (video_receiver_controller_.OnRtpPacket(parsed_packet)) {
14.
./call/rtp_stream_receiver_controller.cc
bool RtpStreamReceiverController::OnRtpPacket(const RtpPacketReceived& packet) {
rtc::CritScope cs(&lock_);
return demuxer_.OnRtpPacket(packet);
}
// 下面掛 RtpVideoStreamReceiver 到 demuxer_, 具體過程如下
14.1
./video/video_receive_stream.cc
VideoReceiveStream::VideoReceiveStream(
TaskQueueFactory* task_queue_factory,
RtpStreamReceiverControllerInterface* receiver_controller,
int num_cpu_cores,
PacketRouter* packet_router,
VideoReceiveStream::Config config,
ProcessThread* process_thread,
CallStats* call_stats,
Clock* clock,
VCMTiming* timing)
media_receiver_ = receiver_controller->CreateReceiver(config_.rtp.remote_ssrc, &rtp_video_stream_receiver_);
rtp_video_stream_receiver_ 就是這個類,在 VideoReceiveStream.h
RtpVideoStreamReceiver rtp_video_stream_receiver_;
14.2
std::unique_ptr<RtpStreamReceiverInterface>
RtpStreamReceiverController::CreateReceiver(uint32_t ssrc,
RtpPacketSinkInterface* sink) {
return std::make_unique<Receiver>(this, ssrc, sink);
}
14.3
RtpStreamReceiverController::Receiver::Receiver(
RtpStreamReceiverController* controller,
uint32_t ssrc,
RtpPacketSinkInterface* sink)
: controller_(controller), sink_(sink) {
const bool sink_added = controller_->AddSink(ssrc, sink_);
if (!sink_added) {
RTC_LOG(LS_ERROR)
<< "RtpStreamReceiverController::Receiver::Receiver: Sink "
<< "could not be added for SSRC=" << ssrc << ".";
}
}
14.4 這個地方把上面的 rtp_video_stream_receiver_ 加到 RtpStreamReceiverController 的 demuxer_ 了,
也就是說 demuxer_.OnRtpPacket(packet) 就會調用 RtpVideoStreamReceiver::OnRtpPacket
bool RtpStreamReceiverController::AddSink(uint32_t ssrc,
RtpPacketSinkInterface* sink) {
rtc::CritScope cs(&lock_);
return demuxer_.AddSink(ssrc, sink);
}
15.
./video/rtp_video_stream_receiver.cc
void RtpVideoStreamReceiver::OnRtpPacket(const RtpPacketReceived& packet)
ReceivePacket(packet);
void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet)
// NAT 保活包
if (packet.payload_size() == 0) {
// Padding or keep-alive packet.
// TODO(nisse): Could drop empty packets earlier, but need to figure out how
// they should be counted in stats.
NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
return;
}
OnReceivedPayloadData(
rtc::MakeArrayView(parsed_payload.payload, parsed_payload.payload_length),
packet, parsed_payload.video);
void RtpVideoStreamReceiver::OnReceivedPayloadData(
rtc::ArrayView<const uint8_t> codec_payload,
const RtpPacketReceived& rtp_packet,
const RTPVideoHeader& video)
// 丟包處理
if (loss_notification_controller_) {
if (rtp_packet.recovered()) {
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
RTC_LOG(LS_INFO)
<< "LossNotificationController does not support reordering.";
} else if (!packet.generic_descriptor) {
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
"frame descriptor, but it is missing.";
} else {
loss_notification_controller_->OnReceivedPacket(
rtp_packet.SequenceNumber(), *packet.generic_descriptor);
}
}
// NACK 處理
if (nack_module_) {
const bool is_keyframe =
video_header.is_first_packet_in_frame &&
video_header.frame_type == VideoFrameType::kVideoFrameKey;
packet.times_nacked = nack_module_->OnReceivedPacket(
rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
} else {
packet.times_nacked = -1;
}
rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
frame_counter_.Add(packet.timestamp);
OnInsertedPacket(packet_buffer_.InsertPacket(&packet));
void RtpVideoStreamReceiver::OnInsertedPacket(
video_coding::PacketBuffer::InsertResult result) {
for (std::unique_ptr<video_coding::RtpFrameObject>& frame : result.frames) {
OnAssembledFrame(std::move(frame));
}
if (result.buffer_cleared) {
RequestKeyFrame();
}
}
void RtpVideoStreamReceiver::OnAssembledFrame(
std::unique_ptr<video_coding::RtpFrameObject> frame)
buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
// buffered_frame_decryptor_ 的產生過程
buffered_frame_decryptor_ = std::make_unique<BufferedFrameDecryptor>(this, this);
if (frame_decryptor != nullptr) {
buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
}
16.
./video/buffered_frame_decryptor.cc
void BufferedFrameDecryptor::ManageEncryptedFrame(
std::unique_ptr<video_coding::RtpFrameObject> encrypted_frame) {
// 流程 16.1
switch (DecryptFrame(encrypted_frame.get())) {
case FrameDecision::kStash:
if (stashed_frames_.size() >= kMaxStashedFrames) {
RTC_LOG(LS_WARNING) << "Encrypted frame stash full poping oldest item.";
stashed_frames_.pop_front();
}
stashed_frames_.push_back(std::move(encrypted_frame));
break;
case FrameDecision::kDecrypted:
RetryStashedFrames();
// 流程 16.2
decrypted_frame_callback_->OnDecryptedFrame(std::move(encrypted_frame));
break;
case FrameDecision::kDrop:
break;
}
}
16.1
BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
video_coding::RtpFrameObject* frame)
// Attempt to decrypt the video frame.
const FrameDecryptorInterface::Result decrypt_result =
frame_decryptor_->Decrypt(cricket::MEDIA_TYPE_VIDEO, /*csrcs=*/{},
additional_data, *frame,
inline_decrypted_bitstream);
16.2
void RtpVideoStreamReceiver::OnDecryptedFrame(
std::unique_ptr<video_coding::RtpFrameObject> frame) {
rtc::CritScope lock(&reference_finder_lock_);
reference_finder_->ManageFrame(std::move(frame));
// reference_finder_ 的創建過程,其實就是 RtpFrameReferenceFinder
reference_finder_ = std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
}
17.
./modules/video_coding/rtp_frame_reference_finder.cc
void RtpFrameReferenceFinder::ManageFrame(std::unique_ptr<RtpFrameObject> frame)
// 流程 17.1
FrameDecision decision = ManageFrameInternal(frame.get());
switch (decision) {
case kStash:
if (stashed_frames_.size() > kMaxStashedFrames)
stashed_frames_.pop_back();
stashed_frames_.push_front(std::move(frame));
break;
case kHandOff:
// 流程 17.2
HandOffFrame(std::move(frame));
RetryStashedFrames();
break;
case kDrop:
break;
}
//--------------------------- ManageFrameInternal 流程分析 ---------------------------
17.1.1
RtpFrameReferenceFinder::FrameDecision
RtpFrameReferenceFinder::ManageFrameInternal(RtpFrameObject* frame) {
absl::optional<RtpGenericFrameDescriptor> generic_descriptor =
frame->GetGenericFrameDescriptor();
if (generic_descriptor) {
return ManageFrameGeneric(frame, *generic_descriptor);
}
switch (frame->codec_type()) {
case kVideoCodecVP8:
return ManageFrameVp8(frame);
case kVideoCodecVP9:
return ManageFrameVp9(frame);
case kVideoCodecH264:
return ManageFrameH264(frame);
default: {
// Use 15 first bits of frame ID as picture ID if available.
const RTPVideoHeader& video_header = frame->GetRtpVideoHeader();
int picture_id = kNoPictureId;
if (video_header.generic)
picture_id = video_header.generic->frame_id & 0x7fff;
return ManageFramePidOrSeqNum(frame, picture_id);
}
}
}
17.1.2
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameH264(
RtpFrameObject* frame)
UpdateDataH264(frame, unwrapped_tl0, tid);
17.1.3
void RtpFrameReferenceFinder::UpdateDataH264(RtpFrameObject* frame,
int64_t unwrapped_tl0,
uint8_t temporal_idx)
UpdateLayerInfoH264(frame, unwrapped_tl0, temporal_idx);
//--------------------------------- HandOffFrame 流程分析 -----------------------------------------
17.2.1
void RtpFrameReferenceFinder::HandOffFrame(std::unique_ptr<RtpFrameObject> frame)
frame_callback_->OnCompleteFrame(std::move(frame));
// frame_callback_ 就是 RtpVideoStreamReceiver,參見上面的分析
reference_finder_ = std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
17.2.2
void RtpVideoStreamReceiver::OnCompleteFrame(
std::unique_ptr<video_coding::EncodedFrame> frame) {
{
rtc::CritScope lock(&last_seq_num_cs_);
video_coding::RtpFrameObject* rtp_frame =
static_cast<video_coding::RtpFrameObject*>(frame.get());
last_seq_num_for_pic_id_[rtp_frame->id.picture_id] = rtp_frame->last_seq_num();
}
last_completed_picture_id_ =
std::max(last_completed_picture_id_, frame->id.picture_id);
complete_frame_callback_->OnCompleteFrame(std::move(frame));
}
17.2.3
void VideoReceiveStream::OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame)
int64_t last_continuous_pid = frame_buffer_->InsertFrame(std::move(frame));
if (last_continuous_pid != -1)
rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid);
// VideoReceiveStream 構造函數中創建 frame_buffer_
frame_buffer_.reset(new video_coding::FrameBuffer(clock_, timing_.get(), &stats_proxy_));
17.2.4 這個就是 jitterbuffer 了,到次爲止,後續會解碼,渲染是另外一個流程了,音頻接收流程基本類似於這個。
./modules/video_coding/frame_buffer2.cc
int64_t FrameBuffer::InsertFrame(std::unique_ptr<EncodedFrame> frame)
// 整個視頻的接收到此分析完畢!!!!!!!