webrtc 中有關 socket 運行機制以及 stun 收發過程 及 Candidates 生成流程分析

----------------------------------------------------------------------------------------------------------------------------------------

一分鐘快速搭建 rtmpd 服務器: https://blog.csdn.net/freeabc/article/details/102880984

軟件下載地址: http://www.qiyicc.com/download/rtmpd.rar

github 地址:https://github.com/superconvert/smart_rtmpd

-----------------------------------------------------------------------------------------------------------------------------------------

webrtc 中有關 socket 運行機制以及 stun 收發過程 及 Candidates 生成流程分析

//*******************************************************************************************
//
// webrtc 內部很多創建 socket 的地方,這個需要調用類廠 BasicPacketSocketFactory , 下面
// 這一小段就是分析 BasicPacketSocketFactory 的創建,以及內部管理的 socket 的部分流程
//
//*******************************************************************************************
    AsyncPacketSocket* BasicPacketSocketFactory::CreateUdpSocket(
        const SocketAddress& address,
        uint16_t min_port,
        uint16_t max_port) {
      // UDP sockets are simple.
      // 參見下面的 SocketDispatcher
      AsyncSocket* socket =
          socket_factory()->CreateAsyncSocket(address.family(), SOCK_DGRAM);
      if (!socket) {
        return NULL;
      }
      //----------------------------------------------------------------------------
      // 這個 BindSocket 最終會調用系統的 bind
      //----------------------------------------------------------------------------
      if (BindSocket(socket, address, min_port, max_port) < 0) {
        RTC_LOG(LS_ERROR) << "UDP bind failed with error " << socket->GetError();
        delete socket;
        return NULL;
      }
      
      //----------------------------------------------------------------------------------------
      // 這個裏面綁定了讀和寫事件到 AsyncUDPSocket::OnReadEvent , AsyncUDPSocket::OnWriteEvent
      //----------------------------------------------------------------------------------------  
      return new AsyncUDPSocket(socket);
    }
    
    1. 創建 BasicPacketSocketFactory
    ./pc/peer_connection_factory.cc
    BasicPacketSocketFactory 是 PeerConnectionFactory::Initialize() 中創建的
    default_socket_factory_.reset(new rtc::BasicPacketSocketFactory(network_thread_));     
    
    2.
    ./sdk/android/src/jni/pc/peer_connection_factory.cc
    而 network_thread_ 則是 接口 CreatePeerConnectionFactoryForJava 裏的
    std::unique_ptr<rtc::Thread> network_thread = rtc::Thread::CreateWithSocketServer();    
    其實就是這個
    std::unique_ptr<Thread> Thread::CreateWithSocketServer() {
        return std::unique_ptr<Thread>(new Thread(SocketServer::CreateDefault()));
    }
    
    其實就是創建了 PhysicalSocketServer
    std::unique_ptr<SocketServer> SocketServer::CreateDefault() {
    #if defined(__native_client__)
        return std::unique_ptr<SocketServer>(new rtc::NullSocketServer);
    #else
        return std::unique_ptr<SocketServer>(new rtc::PhysicalSocketServer);
    #endif
    }
    
    Thread 繼承於 class RTC_LOCKABLE RTC_EXPORT Thread : public MessageQueue, public webrtc::TaskQueueBase 
    構造函數 Thread(SocketServer* ss)把 ss 賦值給基類 MessageQueue,基類接口通過 socketserver 返回這個對象
    SocketServer* MessageQueue::socketserver() {
        return ss_;
    }
    
    所以上面的這句話 socket_factory()->CreateAsyncSocket 其實就是調用 
    ./rtc_base/physical_socket_server.cc
    AsyncSocket* PhysicalSocketServer::CreateAsyncSocket(int family, int type) {        
        SocketDispatcher* dispatcher = new SocketDispatcher(this);
        // 這個裏面通過 PhysicalSocket::Create 創建一個套接字
        if (dispatcher->Create(family, type)) {
            return dispatcher;
        } else {
            delete dispatcher;
            return nullptr;
        }
    }
    
//******************************************************************************
//
// 下面這段是講述 socket 怎麼接收數據的,和上述流程沒任何關係
//
//******************************************************************************

    上述流程中,有一個這個函數調用,
    std::unique_ptr<Thread> Thread::CreateWithSocketServer() {
        return std::unique_ptr<Thread>(new Thread(SocketServer::CreateDefault()));
    }
    創建一個帶線程的 socket    這個線程的 Run 如下:
    
    void Thread::Run() {
        ProcessMessages(kForever);
    }
    
    // 這個裏面不斷的 Get 最新的 message 進行處理
    bool Thread::ProcessMessages(int cmsLoop) {
      // Using ProcessMessages with a custom clock for testing and a time greater
      // than 0 doesn't work, since it's not guaranteed to advance the custom
      // clock's time, and may get stuck in an infinite loop.
      RTC_DCHECK(GetClockForTesting() == nullptr || cmsLoop == 0 ||
                 cmsLoop == kForever);
      int64_t msEnd = (kForever == cmsLoop) ? 0 : TimeAfter(cmsLoop);
      int cmsNext = cmsLoop;

      while (true) {
    #if defined(WEBRTC_MAC)
        ScopedAutoReleasePool pool;
    #endif
        Message msg;
        if (!Get(&msg, cmsNext))
          return !IsQuitting();
        Dispatch(&msg);

        if (cmsLoop != kForever) {
          cmsNext = static_cast<int>(TimeUntil(msEnd));
          if (cmsNext < 0)
            return true;
        }
      }
    }
    
    // 其實就是基類的 MessageQueue 的接口
    bool MessageQueue::Get(Message* pmsg, int cmsWait, bool process_io)
        // 看到這個 ss_ 了嗎,就是 SocketServer::CreateDefault() 也就是 PhysicalSocketServer::Wait 接口
        if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
        
    這個地方監聽所有的 socket 操作,三個版本的都有 win, linux,隨便找一個分析
    ./rtc_base/physical_socket_server.cc
    bool PhysicalSocketServer::Wait(int cmsWait, bool process_io)
        return WaitEpoll(cmsWait, signal_wakeup_);
        
    bool PhysicalSocketServer::WaitEpoll(int cmsWait)
        ProcessEvents(pdispatcher, readable, writable, check_error);
        
    static void ProcessEvents(Dispatcher* dispatcher, bool readable, bool writable, bool check_error) 
        // 這裏就是 SocketDispatcher -
        dispatcher->OnEvent(ff, errcode);
        
    void SocketDispatcher::OnEvent(uint32_t ff, int err)
        如果是讀,這裏假設是 UDP 
        SignalReadEvent(this);
        
    ./rtc_base/async_udp_socket.cc
    void AsyncUDPSocket::OnReadEvent(AsyncSocket* socket)
        SignalReadPacket(this, buf_, static_cast<size_t>(len), remote_addr,
                   (timestamp > -1 ? timestamp : TimeMicros()));
                   
    
    ./p2p/base/stun_port.cc 
    void UDPPort::OnReadPacket(rtc::AsyncPacketSocket* socket,
        const char* data,
        size_t size,
        const rtc::SocketAddress& remote_addr,
        const int64_t& packet_time_us)
        
//******************************************************************************
//
// 下面就分析了有關 webrtc stun 流程的部分
//
//******************************************************************************
    
1. 從這裏開始分析,這個的調用參考 createPeerConnection 流程
JsepTransportController::MaybeStartGathering

2. 這個 ice_transport 就是 P2PTransportChannel 對象
dtls->ice_transport()->MaybeStartGathering();

3. 第一次創建流程
./p2p/base/p2p_transport_channel.cc
P2PTransportChannel::MaybeStartGathering 
    //------------------------------------------------------------
    // 這個就是創建一個 PortAllocatorSession 並把信號掛接 P2PTransportChannel
    //------------------------------------------------------------
    AddAllocatorSession(allocator_->CreateSession(
          transport_name(), component(), ice_parameters_.ufrag,
          ice_parameters_.pwd));
    // 進行 PortAllocatorSession 接口的調用
    allocator_sessions_.back()->StartGettingPorts();
  
    3.1
    這個 allocator_ 來自下面的函數調用,我們看出 就是 JsepTransportController 的成員 port_allocator_
    ./pc/jsep_transport_controller.cc
    rtc::scoped_refptr<webrtc::IceTransportInterface>
    JsepTransportController::CreateIceTransport(const std::string& transport_name, bool rtcp) {
        int component = rtcp ? cricket::ICE_CANDIDATE_COMPONENT_RTCP : cricket::ICE_CANDIDATE_COMPONENT_RTP;
    
        IceTransportInit init;
        init.set_port_allocator(port_allocator_);
        init.set_async_resolver_factory(async_resolver_factory_);
        init.set_event_log(config_.event_log);
        return config_.ice_transport_factory->CreateIceTransport(transport_name, component, std::move(init));
    }
    
    ./api/ice_transport_factory.cc
    rtc::scoped_refptr<IceTransportInterface> CreateIceTransport(IceTransportInit init) {
        return new rtc::RefCountedObject<IceTransportWithTransportChannel>(
            std::make_unique<cricket::P2PTransportChannel>(
            "", 0, init.port_allocator(), init.async_resolver_factory(), init.event_log()));
    }
    
    3.2
    我們跟蹤一下 port_allocator_ 是在 JsepTransportController 初始化過程中傳遞過來的,我們分析 JsepTransportController 
    初始化,發現其實就是來自 PeerConnection 的 port_allocator_ 對象
    
    ./pc/peer_connection.cc
    bool PeerConnection::Initialize(const PeerConnectionInterface::RTCConfiguration& configuration,    
        PeerConnectionDependencies dependencies)
        
      // 傳遞過來的。。。。。。
      port_allocator_ = std::move(dependencies.allocator);
      ... ...
      // 賦值給 JsepTransportController
      transport_controller_.reset(new JsepTransportController(
      signaling_thread(), network_thread(), port_allocator_.get(),
      async_resolver_factory_.get(), config));
      
    3.3
    我們分析 PeerConnection 的初始化過程中, port_allocator_ 的產生過程
    ./pc/peer_connection_factory.cc
    rtc::scoped_refptr<PeerConnectionInterface>
    PeerConnectionFactory::CreatePeerConnection(const PeerConnectionInterface::RTCConfiguration& configuration,
        PeerConnectionDependencies dependencies)
        
        if (!dependencies.allocator) {
            rtc::PacketSocketFactory* packet_socket_factory;
            if (dependencies.packet_socket_factory)
              packet_socket_factory = dependencies.packet_socket_factory.get();
            else
              // 這個就是 BasicPacketSocketFactory 參見上面的分析
              packet_socket_factory = default_socket_factory_.get();

            network_thread_->Invoke<void>(RTC_FROM_HERE, [this, &configuration,
                &dependencies,
                &packet_socket_factory]() {
                //------------------------------------------------------
                // 這個就是我們要追蹤的 port_allocator_ !!!!!!!!!!!!
                //------------------------------------------------------
                dependencies.allocator = std::make_unique<cricket::BasicPortAllocator>(
                    default_network_manager_.get(), packet_socket_factory, configuration.turn_customizer);
            });
        }
        
        rtc::scoped_refptr<PeerConnection> pc(new rtc::RefCountedObject<PeerConnection>(this, std::move(event_log),
            std::move(call)));
        ActionsBeforeInitializeForTesting(pc);
        if (!pc->Initialize(configuration, std::move(dependencies))) {
            return nullptr;
        }
        
    上述函數被下面這個調用,我們發現這個裏面 dependencies.allocator 爲空,因此 port_allocator_ 是在上面的步驟中分配的
    ./sdk/android/src/jni/pc/peer_connection_factory.cc
    static jlong JNI_PeerConnectionFactory_CreatePeerConnection(
        JNIEnv* jni,
        jlong factory,
        const JavaParamRef<jobject>& j_rtc_config,
        const JavaParamRef<jobject>& j_constraints,
        jlong observer_p,
        const JavaParamRef<jobject>& j_sslCertificateVerifier)
        
        PeerConnectionDependencies peer_connection_dependencies(observer.get());
        if (!j_sslCertificateVerifier.is_null()) {
            peer_connection_dependencies.tls_cert_verifier = std::make_unique<SSLCertificateVerifierWrapper>(
                jni, j_sslCertificateVerifier);
        }
        rtc::scoped_refptr<PeerConnectionInterface> pc =
            PeerConnectionFactoryFromJava(factory)->CreatePeerConnection(rtc_config, 
            std::move(peer_connection_dependencies));
    
    
    3.4 我們繼續分析 BasicPortAllocator 的接口 CreateSession
    ./p2p/base/port_allocator.cc
    std::unique_ptr<PortAllocatorSession> PortAllocator::CreateSession(
        const std::string& content_name,
        int component,
        const std::string& ice_ufrag,
        const std::string& ice_pwd) {
      CheckRunOnValidThreadAndInitialized();
      auto session = std::unique_ptr<PortAllocatorSession>(
          CreateSessionInternal(content_name, component, ice_ufrag, ice_pwd));
      session->SetCandidateFilter(candidate_filter());
      return session;
    }
    
    ./p2p/client/basic_port_allocator.cc    
    PortAllocatorSession* BasicPortAllocator::CreateSessionInternal(const std::string& content_name,
        int component, const std::string& ice_ufrag, const std::string& ice_pwd) {
        CheckRunOnValidThreadAndInitialized();
        PortAllocatorSession* session = new BasicPortAllocatorSession(this, content_name, component, ice_ufrag, ice_pwd);
        session->SignalIceRegathering.connect(this, &BasicPortAllocator::OnIceRegathering);
        return session;
    }
    
4.
./p2p/client/basic_port_allocator.cc
void BasicPortAllocatorSession::StartGettingPorts() {
  RTC_DCHECK_RUN_ON(network_thread_);
  state_ = SessionState::GATHERING;
  if (!socket_factory_) {
    owned_socket_factory_.reset(
        new rtc::BasicPacketSocketFactory(network_thread_));
    socket_factory_ = owned_socket_factory_.get();
  }

  network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_START);

  RTC_LOG(LS_INFO) << "Start getting ports with turn_port_prune_policy "
                   << turn_port_prune_policy_;
}

5.
void BasicPortAllocatorSession::OnMessage(rtc::Message* message) {
  switch (message->message_id) {
    case MSG_CONFIG_START:
      GetPortConfigurations();
      break;
    case MSG_CONFIG_READY:
      OnConfigReady(static_cast<PortConfiguration*>(message->pdata));
      break;
    case MSG_ALLOCATE:
      OnAllocate();
      break;
    case MSG_SEQUENCEOBJECTS_CREATED:
      OnAllocationSequenceObjectsCreated();
      break;
    case MSG_CONFIG_STOP:
      OnConfigStop();
      break;
    default:
      RTC_NOTREACHED();
  }
}

void BasicPortAllocatorSession::GetPortConfigurations() {
  RTC_DCHECK_RUN_ON(network_thread_);

  PortConfiguration* config =
      new PortConfiguration(allocator_->stun_servers(), username(), password());

  for (const RelayServerConfig& turn_server : allocator_->turn_servers()) {
    config->AddRelay(turn_server);
  }
  ConfigReady(config);
}

void BasicPortAllocatorSession::ConfigReady(PortConfiguration* config) {
  RTC_DCHECK_RUN_ON(network_thread_);
  network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_READY, config);
}

6. 
void BasicPortAllocatorSession::OnConfigReady(PortConfiguration* config) {
  RTC_DCHECK_RUN_ON(network_thread_);
  if (config) {
    configs_.push_back(config);
  }

  AllocatePorts();
}

void BasicPortAllocatorSession::AllocatePorts() {
  RTC_DCHECK_RUN_ON(network_thread_);
  network_thread_->Post(RTC_FROM_HERE, this, MSG_ALLOCATE);
}

7.
void BasicPortAllocatorSession::OnAllocate() {
  RTC_DCHECK_RUN_ON(network_thread_);

  if (network_manager_started_ && !IsStopped()) {
    bool disable_equivalent_phases = true;
    DoAllocate(disable_equivalent_phases);
  }

  allocation_started_ = true;
}

void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent)

    AllocationSequence* sequence = new AllocationSequence(this, networks[i], config, sequence_flags);
    sequence->SignalPortAllocationComplete.connect(
        this, &BasicPortAllocatorSession::OnPortAllocationComplete);
    sequence->Init();
    sequence->Start();
    sequences_.push_back(sequence);
    done_signal_needed = true;
    
    network_thread_->Post(RTC_FROM_HERE, this, MSG_SEQUENCEOBJECTS_CREATED);
    
    7.1 sequence->Init() 這個裏面創建了一個 UDP 套接字,並綁定讀取接口
    ./p2p/client/basic_port_allocator.cc
    void AllocationSequence::Init() {
        if (IsFlagSet(PORTALLOCATOR_ENABLE_SHARED_SOCKET)) {
            udp_socket_.reset(session_->socket_factory()->CreateUdpSocket(
            rtc::SocketAddress(network_->GetBestIP(), 0),
            session_->allocator()->min_port(), session_->allocator()->max_port()));
            if (udp_socket_) {
                udp_socket_->SignalReadPacket.connect(this,
                    &AllocationSequence::OnReadPacket);
            }
            // Continuing if |udp_socket_| is NULL, as local TCP and RelayPort using TCP
            // are next available options to setup a communication channel.
        }
    }
    
    7.2 sequence->Start()
        session_->network_thread()->Post(RTC_FROM_HERE, this, MSG_ALLOCATION_PHASE);
    
    7.3
    void AllocationSequence::OnMessage(rtc::Message* msg) {
        RTC_DCHECK(rtc::Thread::Current() == session_->network_thread());
        RTC_DCHECK(msg->message_id == MSG_ALLOCATION_PHASE);

        const char* const PHASE_NAMES[kNumPhases] = {"Udp", "Relay", "Tcp"};

        // Perform all of the phases in the current step.
        RTC_LOG(LS_INFO) << network_->ToString() << ": Allocation Phase=" << PHASE_NAMES[phase_];

        switch (phase_) {
            case PHASE_UDP:
            CreateUDPPorts();
            CreateStunPorts();
            break;

        case PHASE_RELAY:
            CreateRelayPorts();
            break;

        case PHASE_TCP:
            CreateTCPPorts();
            state_ = kCompleted;
            break;

        default:
            RTC_NOTREACHED();
        }

        if (state() == kRunning) {
            ++phase_;
            session_->network_thread()->PostDelayed(RTC_FROM_HERE,
                session_->allocator()->step_delay(),
                this, MSG_ALLOCATION_PHASE);
        } else {
            // If all phases in AllocationSequence are completed, no allocation
            // steps needed further. Canceling  pending signal.
            session_->network_thread()->Clear(this, MSG_ALLOCATION_PHASE);
            SignalPortAllocationComplete(this);
        }
    }
    
    7.4
    void AllocationSequence::CreateUDPPorts()
        // 把上述創建的 udp_socket_ 
        port = UDPPort::Create(
        session_->network_thread(), session_->socket_factory(), network_,
        session_->allocator()->min_port(), session_->allocator()->max_port(),
        session_->username(), session_->password(),
        session_->allocator()->origin(), emit_local_candidate_for_anyaddress,
        session_->allocator()->stun_candidate_keepalive_interval());
    
        //+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        // 參見下面的 AddAllocatedPort 主要是 OnCandidateReady, OnPortComplete
        //+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        session_->AddAllocatedPort(port.release(), this, true);
    
        7.4.1
        ./p2p/base/stun_port.h
        static std::unique_ptr<UDPPort> Create(
            rtc::Thread* thread,
            rtc::PacketSocketFactory* factory,
            rtc::Network* network,
            rtc::AsyncPacketSocket* socket,
            const std::string& username,
            const std::string& password,
            const std::string& origin,
            bool emit_local_for_anyaddress,
            absl::optional<int> stun_keepalive_interval) {
            // Using `new` to access a non-public constructor.
            auto port = absl::WrapUnique(new UDPPort(thread, factory, network, socket,
                username, password, origin,
                emit_local_for_anyaddress));
            port->set_stun_keepalive_delay(stun_keepalive_interval);
            if (!port->Init()) {
                return nullptr;
            }
            return port;
        }
    
        7.4.2
        bool UDPPort::Init()
            stun_keepalive_lifetime_ = GetStunKeepaliveLifetime();
            if (!SharedSocket()) {
                RTC_DCHECK(socket_ == nullptr);
                //---------------------------------------------------------------------------------
                // 這裏的 socket_factory 其實就是上面的 BasicPacketSocketFactory 的接口,創建 socket 並綁定
                //---------------------------------------------------------------------------------
                socket_ = socket_factory()->CreateUdpSocket(
                    rtc::SocketAddress(Network()->GetBestIP(), 0), min_port(), max_port());
                if (!socket_) {
                    RTC_LOG(LS_WARNING) << ToString() << ": UDP socket creation failed";
                    return false;
                }
                socket_->SignalReadPacket.connect(this, &UDPPort::OnReadPacket);
            }
            socket_->SignalSentPacket.connect(this, &UDPPort::OnSentPacket);
            socket_->SignalReadyToSend.connect(this, &UDPPort::OnReadyToSend);
            socket_->SignalAddressReady.connect(this, &UDPPort::OnLocalAddressReady);
            requests_.SignalSendPacket.connect(this, &UDPPort::OnSendPacket);
        
    7.5
    void BasicPortAllocatorSession::AddAllocatedPort(Port* port,
                                                 AllocationSequence* seq,
                                                 bool prepare_address) {
        RTC_DCHECK_RUN_ON(network_thread_);
        if (!port)
            return;

        RTC_LOG(LS_INFO) << "Adding allocated port for " << content_name();
        port->set_content_name(content_name());
        port->set_component(component());
        port->set_generation(generation());
        if (allocator_->proxy().type != rtc::PROXY_NONE)
            port->set_proxy(allocator_->user_agent(), allocator_->proxy());
        port->set_send_retransmit_count_attribute(
            (flags() & PORTALLOCATOR_ENABLE_STUN_RETRANSMIT_ATTRIBUTE) != 0);

        PortData data(port, seq);
        ports_.push_back(data);

        port->SignalCandidateReady.connect(
            this, &BasicPortAllocatorSession::OnCandidateReady);
        port->SignalCandidateError.connect(
            this, &BasicPortAllocatorSession::OnCandidateError);
        port->SignalPortComplete.connect(this,
            &BasicPortAllocatorSession::OnPortComplete);
        port->SignalDestroyed.connect(this,
            &BasicPortAllocatorSession::OnPortDestroyed);
        port->SignalPortError.connect(this, &BasicPortAllocatorSession::OnPortError);
            RTC_LOG(LS_INFO) << port->ToString() << ": Added port to allocator";

        if (prepare_address)
            port->PrepareAddress();
    }
    
    7.6
    void UDPPort::PrepareAddress() {
        RTC_DCHECK(requests_.empty());
        if (socket_->GetState() == rtc::AsyncPacketSocket::STATE_BOUND) {
            OnLocalAddressReady(socket_, socket_->GetLocalAddress());
        }
    }
    
    void UDPPort::OnLocalAddressReady(rtc::AsyncPacketSocket* socket,
                                  const rtc::SocketAddress& address) {
        // When adapter enumeration is disabled and binding to the any address, the
        // default local address will be issued as a candidate instead if
        // |emit_local_for_anyaddress| is true. This is to allow connectivity for
        // applications which absolutely requires a HOST candidate.
        rtc::SocketAddress addr = address;

        // If MaybeSetDefaultLocalAddress fails, we keep the "any" IP so that at
        // least the port is listening.
        MaybeSetDefaultLocalAddress(&addr);

        AddAddress(addr, addr, rtc::SocketAddress(), UDP_PROTOCOL_NAME, "", "",
                 LOCAL_PORT_TYPE, ICE_TYPE_PREFERENCE_HOST, 0, "", false);
        MaybePrepareStunCandidate();
    }
    
    void UDPPort::MaybePrepareStunCandidate() {
        // Sending binding request to the STUN server if address is available to
        // prepare STUN candidate.
        if (!server_addresses_.empty()) {
            SendStunBindingRequests();
        } else {
            // Port is done allocating candidates.
            MaybeSetPortCompleteOrError();
        }
    }
    
    7.7 這個地方發送 stun 的綁定命令到 stun 服務器
    void UDPPort::SendStunBindingRequests() {
        // We will keep pinging the stun server to make sure our NAT pin-hole stays
        // open until the deadline (specified in SendStunBindingRequest).
        RTC_DCHECK(requests_.empty());

        for (ServerAddresses::const_iterator it = server_addresses_.begin();
            it != server_addresses_.end(); ++it) {
            SendStunBindingRequest(*it);
        }
    }
    
    //--------------------------------------------------------------------------------
    // stun 服務器響應流程分析 socket 接收數據開始
    //--------------------------------------------------------------------------------
    所有的 stun 信令都在這個裏面
    // 這個流程上面已經分析過了
    7.7.1
    bool MessageQueue::Get(Message* pmsg, int cmsWait, bool process_io)
        // 看到這個 ss_ 了嗎,就是 SocketServer::CreateDefault() 也就是 PhysicalSocketServer::Wait 接口
        if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
    7.7.2    
    bool PhysicalSocketServer::Wait(int cmsWait, bool process_io)
        return WaitEpoll(cmsWait);
    7.7.3
    bool PhysicalSocketServer::WaitEpoll(int cmsWait)
        ProcessEvents(pdispatcher, readable, writable, check_error);
    7.7.4
    static void ProcessEvents(Dispatcher* dispatcher,
                          bool readable,
                          bool writable,
                          bool check_error)
        dispatcher->OnEvent(ff, errcode);        
    7.7.5
    ./rtc_base/physical_socket_server.cc
    void SocketDispatcher::OnEvent(uint32_t ff, int err)
        SignalReadEvent(this);
    7.7.6
    ./rtc_base/async_udp_socket.cc
    void AsyncUDPSocket::OnReadEvent(AsyncSocket* socket)
        SignalReadPacket(this, buf_, static_cast<size_t>(len), remote_addr,
                   (timestamp > -1 ? timestamp : TimeMicros()));
    7.7.7
    ./p2p/base/stun_port.cc
    void UDPPort::OnReadPacket(rtc::AsyncPacketSocket* socket,
        const char* data,
        size_t size,
        const rtc::SocketAddress& remote_addr,
        const int64_t& packet_time_us)
        requests_.CheckResponse(data, size);        
    OnReadPacket 綁定 AsyncUDPSocket::
    
    7.7.8            
    ./p2p/base/stun_request.cc
    bool StunRequestManager::CheckResponse(StunMessage* msg)
        request->OnResponse(msg);
    
    7.7.9
    ./p2p/base/stun_port.cc
    void StunBindingRequest::OnResponse(StunMessage* response)
         port_->OnStunBindingRequestSucceeded(this->Elapsed(), server_addr_, addr);
    
    7.8 stun 成功,則進入
    ./p2p/base/stun_port.cc
    void UDPPort::OnStunBindingRequestSucceeded(
        int rtt_ms,
        const rtc::SocketAddress& stun_server_addr,
        const rtc::SocketAddress& stun_reflected_addr)
        
        AddAddress(stun_reflected_addr, socket_->GetLocalAddress(), related_address,
               UDP_PROTOCOL_NAME, "", "", STUN_PORT_TYPE,
               ICE_TYPE_PREFERENCE_SRFLX, 0, url.str(), false);
               
    7.9        
    void Port::AddAddress(const rtc::SocketAddress& address,
                      const rtc::SocketAddress& base_address,
                      const rtc::SocketAddress& related_address,
                      const std::string& protocol,
                      const std::string& relay_protocol,
                      const std::string& tcptype,
                      const std::string& type,
                      uint32_t type_preference,
                      uint32_t relay_preference,
                      const std::string& url,
                      bool is_final)
                      
                      
        FinishAddingAddress(c, is_final);
        
    7.10
    void Port::FinishAddingAddress(const Candidate& c, bool is_final) {
        candidates_.push_back(c);
        SignalCandidateReady(this, c);

        PostAddAddress(is_final);
    }
    
    7.11
    ./p2p/client/basic_port_allocator.cc
    void BasicPortAllocatorSession::OnCandidateReady(Port* port, const Candidate& c)    
         SignalCandidatesReady(this, candidates);
         
    7.12
    ./p2p/base/p2p_transport_channel.cc
    void P2PTransportChannel::OnCandidatesReady(
        PortAllocatorSession* session,
        const std::vector<Candidate>& candidates) {
        RTC_DCHECK_RUN_ON(network_thread_);
        for (size_t i = 0; i < candidates.size(); ++i) {
            SignalCandidateGathered(this, candidates[i]);
        }
    }
    
    7.13
    ./pc/jsep_transport_controller.cc
    void JsepTransportController::OnTransportCandidateGathered_n(
        cricket::IceTransportInternal* transport,
        const cricket::Candidate& candidate) {
        RTC_DCHECK(network_thread_->IsCurrent());

        // We should never signal peer-reflexive candidates.
        if (candidate.type() == cricket::PRFLX_PORT_TYPE) {
            RTC_NOTREACHED();
            return;
        }
        std::string transport_name = transport->transport_name();
        invoker_.AsyncInvoke<void>(
            RTC_FROM_HERE, signaling_thread_, [this, transport_name, candidate] {
            SignalIceCandidatesGathered(transport_name, {candidate});
        });
    }
    
    7.14
    ./pc/peer_connection.cc
    void PeerConnection::OnTransportControllerCandidatesGathered(
        const std::string& transport_name,
        const cricket::Candidates& candidates) {
        int sdp_mline_index;
        if (!GetLocalCandidateMediaIndex(transport_name, &sdp_mline_index)) {
            RTC_LOG(LS_ERROR)
                << "OnTransportControllerCandidatesGathered: content name "
                << transport_name << " not found";
            return;
        }

        for (cricket::Candidates::const_iterator citer = candidates.begin();
            citer != candidates.end(); ++citer) {
            // Use transport_name as the candidate media id.
            std::unique_ptr<JsepIceCandidate> candidate(
                new JsepIceCandidate(transport_name, sdp_mline_index, *citer));
            if (local_description()) {
                mutable_local_description()->AddCandidate(candidate.get());
            }
            OnIceCandidate(std::move(candidate));
        }
    }
    
    void PeerConnection::OnIceCandidate(
        std::unique_ptr<IceCandidateInterface> candidate) {
      if (IsClosed()) {
        return;
      }
      ReportIceCandidateCollected(candidate->candidate());
      // 這個地方回調到 Java 層的接口,並把自己的 candidate 發送給對方
      Observer()->OnIceCandidate(candidate.get());
    }
        
    
8.
void BasicPortAllocatorSession::OnAllocationSequenceObjectsCreated() {
  RTC_DCHECK_RUN_ON(network_thread_);
  allocation_sequences_created_ = true;
  // Send candidate allocation complete signal if we have no sequences.
  MaybeSignalCandidatesAllocationDone();
}

./p2p/base/p2p_transport_channel.cc
void P2PTransportChannel::OnCandidatesAllocationDone(
    PortAllocatorSession* session) {
  RTC_DCHECK_RUN_ON(network_thread_);
  if (config_.gather_continually()) {
    RTC_LOG(LS_INFO) << "P2PTransportChannel: " << transport_name()
                     << ", component " << component()
                     << " gathering complete, but using continual "
                        "gathering so not changing gathering state.";
    return;
  }
  gathering_state_ = kIceGatheringComplete;
  RTC_LOG(LS_INFO) << "P2PTransportChannel: " << transport_name()
                   << ", component " << component() << " gathering complete";
  SignalGatheringState(this);
}

./pc/jsep_transport_controller.cc
void JsepTransportController::OnTransportGatheringState_n(
    cricket::IceTransportInternal* transport) {
  RTC_DCHECK(network_thread_->IsCurrent());
  UpdateAggregateStates_n();
}

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章