在Vovida的基礎上實現自己的SIP協議棧(五)

 

在Vovida的基礎上實現自己的SIP協議棧(五)

盧政 2003/08/07

3.2.8.2處理RTP/RTCP包:

  前面說了ResGwDevice::processSessionMsg處理掛在設備處理隊列裏的各個命令,我們具體來看具體的應用程序處理過程:

a.處理用戶發出的終端消息,並且打開設備發送媒體包。
ResGwDevice::processSessionMsg( Sptr event ):
void ResGwDevice::processSessionMsg( Sptr event )
{
Sptr msg;
msg.dynamicCast( event );
if( msg != 0 )
{
cpLog( LOG_DEBUG, "Got message type: %d", msg->type );
switch( msg->type )
{
case HardwareSignalType://這個狀態是爲Voicamail而設定的,在Feauture
//Server這章裏面會說道
……
case HardwareAudioType:
switch ((msg->signalOrRequest).request.type)
{
case AudioStart://打開聲音設備建立RTP/RTCP會話
audioStart((msg->signalOrRequest).request);
break;
case AudioStop://停止聲音設備,並且釋設備佔用的資源並且停止建立的
//話,將其資源釋放;
audioStop();
break;
case AudioSuspend:
audioSuspend();//暫停設備,但是資源不釋放,RTP會話也不停止。
break;
case AudioResume:
audioResume((msg->signalOrRequest).request);//重新啓動設備
break;
default:
cpLog( LOG_ERR, "Unknown audio request: %d",
(msg->signalOrRequest).request.type );
}
break;
… …
}
}
}

b.根據遠端和本地的SDP建立RTP會話(經過簡化):
int SoundCardDevice::audioStart( const HardwareAudioRequest& request )
{
deviceMutex.lock();

// create new audioStack for this audio session
// 0 is rtpPayloadPCUM
// last paramter, -1, disables jitter buffer
if( audioStack == 0 )
{
int remoteRtcpPort = (request.remotePort > 0) ? request.remotePort + 1 : 0;
int localRtcpPort = (request.localPort > 0) ? request.localPort + 1 : 0;
cerr << "%%% Remote rtcp port : " << remoteRtcpPort << "\n";
cerr << "%%% Local rtcp port : " << localRtcpPort << "\n\n";
const char* remoteHost = 0;
if ( request.remotePort != 0 )
remoteHost = request.remoteHost;
//創建RTP會話,帶入的參數有:被地本地/遠端的主機/RTP,RTCP,端口RTP的載荷類型,//網絡承載類型,創建接收/發送RTP/RTCP包的控制檯,以及接受播放的緩衝區Inbuff
audioStack = new RtpSession( remoteHost, request.remotePort,
request.localPort, remoteRtcpPort,
localRtcpPort, rtpPayloadPCMU,
rtpPayloadPCMU, 0 );
}
else
{
… …
}
//決定是否開啓/關斷向遠方回送的震鈴
if( request.sendRingback )
startSendRingback();
else
stopSendRingback();
… …
// apiFormat_clockRate
// apiFormat_payloadSize
//設置RTP包的承載類型,目前設置爲PCMU方式,以及包的大小
audioStack->setApiFormat( rtpPayloadPCMU, request.rtpPacketSize*8 );
//傳輸/接收時的RTP包的大小,這裏設置成和RTP包相同大小類型
audioStack->setNetworkFormat( rtpPayloadPCMU, request.rtpPacketSize*8 );
deviceMutex.unlock();
reopenAudioHardware();
return 0;
}

c.如何接收或者發送RTP/RTCP數據包:
我們在前面已經看到了在SoundCardDevice::processRTP ()調用了RTPSession::Receive()以及RTPSession::TransimitterRAW()的方法,來接收/發送RTP,RTCP數據流.
1> RTP數據流的接收,它不會直接刪除數據,但是會用替代的方式對inbuff數據做更新:
RtpPacket* RtpReceiver::receive ()
{
RtpPacket* p = NULL;
int len = 0;
int len1 = 0;
int silencePatched = 0;
bool faking = 0;


// empty network que
NtpTime arrival (0, 0);
while (1) // network empty or time to play return packet
{ //從網絡設備的緩衝隊列中取出數據
p = getPacket();
if (p == NULL) break;

// only play packets for valid sources
if (probation < 0)
{
cpLog(LOG_ERR, "****Packet from invalid source");
delete p;
p = NULL;
continue;
}
//獲取包的抵達時間
arrival = getNtpTime();
int packetTransit = 0;
int delay = 0;


rtp_ntohl(p);

// convert codec
if (p->getPayloadType() != apiFormat)
{
#ifndef __sparc
// 當前接收到的包不符合目前的格式(假設爲PCMU格式,那麼下面做格式轉換)
//例如mono轉PCMU8K 16Bit具體的調用格式可以參看//convertCodec(RtpPayloadType fromType, RtpPayloadType toType,
// char* in_data, char* out_data, int len)
//它的主要作用在於將數據區內的數據進行格式轉換,至於帶入的參數我想就不//用過多解釋了。有興趣的同志可以參考
//unsigned char linear2ulaw( int pcm_val );
//int ulaw2linear( unsigned char u_val )這兩個函數的原代碼
RtpPacket* oldp = p;
p = convertRtpPacketCodec (apiFormat, oldp);
… …
#endif
}
//取得有效載荷的長度
len = p->getPayloadUsage();
if (len <= 0 || len > 1012)
{
delete p;
p = NULL;
continue;
}

// 重新調整接收到的RTP包的長度,使長度在網絡承載允許範圍內
if (len > networkFormat_payloadSize )
{
int lenold = len;
len = ( len / networkFormat_payloadSize ) * networkFormat_payloadSize;
p->setPayloadUsage( len );
network_pktSampleSize = (lenold / networkFormat_payloadSize) * network_pktSampleSize;
}

… …
根據接收到RTP的分組序號和時間戳標誌的數據對Inbuff裏的數據包進行重排,
if (RtpSeqGreater(p->getSequence(), prevSeqRecv))
{
在這裏是把包增加到數據的隊列尾
while (RtpSeqGreater(p->getSequence(), prevSeqRecv))
{
silencePatched = 0;
faking = 0;
//下面程序部分是在收到的分組頭部增加白燥聲。
while( RtpTimeGreater( p->getRtpTime() - network_pktSampleSize, prevPacketRtpTime ) && ((p->getSequence() - 1) == prevSeqRecv))
{
if( silenceCodec == 0 )//
{
cpLog( LOG_DEBUG_STACK, "Patching silence" );
if ((p->getPayloadType() >= rtpPayloadDynMin) &&
(p->getPayloadType() <= rtpPayloadDynMax) &&
(codecString[0] != '\0'))
{
silenceCodec = findSilenceCodecString(codecString, len);
}
else
{//添加白噪音
silenceCodec = findSilenceCodec( p->getPayloadType(), len );
}
if( silenceCodec == 0 )
{
if( len > rtpCodecInfo[ numRtpCodecInfo - 1 ].length )
{
assert( 0 );
}
silenceCodec = (char*)&rtpCodecInfo[ numRtpCodecInfo - 1 ].silence;
faking = 1;
}
}
assert( silenceCodec );

if ((inPos + len) < IN_BUFFER_SIZE)
{
memcpy (inBuff + inPos, silenceCodec, len);
inPos += len;
silencePatched++;
}
else
{
// circular memory copy
len1 = IN_BUFFER_SIZE - inPos;
memcpy (inBuff + inPos, silenceCodec, len1);
memcpy (inBuff, silenceCodec + len1, len - len1);
inPos = len - len1;
//printf("inPos S=%d\n", inPos);
silencePatched++;
}
prevPacketRtpTime += network_pktSampleSize;
}
if( prevPacketRtpTime != p->getRtpTime() - network_pktSampleSize)
{
prevPacketRtpTime = p->getRtpTime() - network_pktSampleSize;
}
//在inbuff隊列中插入已經待播放的分組,


if ((inPos + len) < IN_BUFFER_SIZE)
{
memcpy (inBuff + inPos, p->getPayloadLoc(), len);
inPos += len;
}
else
{
// circular memory copy
len1 = IN_BUFFER_SIZE - inPos;
memcpy (inBuff + inPos, p->getPayloadLoc(), len1);
memcpy (inBuff, p->getPayloadLoc() + len1, len - len1);
inPos = len - len1;
}

//更新受到包的計數器
RtpSeqNumber tSeq = prevSeqRecv;
prevSeqRecv++;
if(prevSeqRecv > RTP_SEQ_MOD)
{
prevSeqRecv = 0;
}
if (prevSeqRecv < tSeq)
{
cpLog(LOG_DEBUG_STACK, "Recv cycle");
assert(prevSeqRecv == 0);
recvCycles += RTP_SEQ_MOD;
}
}
prevPacketRtpTime = p->getRtpTime();
if (silencePatched > 0)
cpLog(LOG_DEBUG_STACK, "silencePatched = %d", silencePatched);
if (faking)
silenceCodec = 0;
if (p->getSequence() != prevSeqRecv)
{
cpLog(LOG_DEBUG_STACK, "Unequal packet:%d stack:%d",
prevSeqRecv, p->getSequence());
}
}
else
{
RtpSeqNumber base_prevSeqRecv = prevSeqRecv;
int inSeqRecv = 1;
while (RtpSeqGreater(base_prevSeqRecv, p->getSequence()))
{
inSeqRecv++;
base_prevSeqRecv--;
}
int inPosTemp = inPos - inSeqRecv * len;
if (inPosTemp < 0) inPosTemp = IN_BUFFER_SIZE + inPosTemp;

if ((inPosTemp + len) < IN_BUFFER_SIZE)
{
memcpy (inBuff + inPosTemp, p->getPayloadLoc(), len);
}
else
{
// circular memory copy
len1 = IN_BUFFER_SIZE - inPosTemp;
memcpy (inBuff + inPosTemp, p->getPayloadLoc(), len1);
memcpy (inBuff, (p->getPayloadLoc()) + len1, len - len1);
}
}

// update packet received
packetReceived++;
payloadReceived += len;

// update jitter calculation
packetTransit = arrival - rtp2ntp(p->getRtpTime());
delay = packetTransit - transit;
transit = packetTransit;
if (delay < 0) delay = -delay;
jitter += delay - ((jitter + 8) >> 4);

// fractional
// s->jitterTime += (1./16.) * ((double)deley - s->jitterTime);
// integer
//jitterTime += delay - ((jitterTime+8) >> 4);


if (p)
{
delete p;
p = NULL;
}
}

int packetSize = apiFormat_payloadSize;
… …

//按照apiformat_playloadsize的長度,分割原有的數據包,重新構造一個RTP數據包以便適
//合設備播放,當然如果雙方把apiformat_playloadsize和networkFormat_payloadSize設置相
//同也可以。
assert (!p);
p = new RtpPacket (packetSize);
if ( (playPos + packetSize) < IN_BUFFER_SIZE)
{
memcpy (p->getPayloadLoc(), inBuff + playPos, packetSize);
playPos += packetSize;
}
else
{
len1 = IN_BUFFER_SIZE - playPos;
memcpy (p->getPayloadLoc(), inBuff + playPos, len1);
memcpy (p->getPayloadLoc() + len1, inBuff, packetSize - len1);
playPos = packetSize - len1;
}

//構造RTP數據包的包頭部分
p->setSSRC (ssrc);
p->setPayloadType (apiFormat);
p->setPayloadUsage (packetSize);
p->setRtpTime (prevRtpTime + api_pktSampleSize);
p->setSequence (prevSeqPlay + 1);

if (probation > 0) probation --;
receiverError = recv_success;
prevRtpTime = p->getRtpTime();
prevNtpTime = getNtpTime();
gotime = rtp2ntp (p->getRtpTime() + api_pktSampleSize) + jitterTime;
//更新已經播放的數據包的計數器
RtpSeqNumber sSeq = prevSeqPlay;
prevSeqPlay++;
if (prevSeqPlay < sSeq)
{
playCycles += RTP_SEQ_MOD;
}

return p;
}

2> RTP數據流的發送:
  RTPSession::TransimitterRAW方法,RTP數據流的發送方法,發送沒有接收這麼複雜,不需要針對Buff中數據按照包序列號和NTP排序,最後再根據本地的包長度充足重組,只要寫入Outbuff中直接加上RTP頭就可以直接發送了。
int RtpTransmitter::transmitRaw (char* data, int len)
{
int len1;
//如果媒體設備所能接受的長度制式和網絡傳輸的制式不能相符匹配,那麼調用轉換程序進行轉//化,把本地播放制式長度轉換成網絡制式長度。ConvertCodec這個函數我們在前面已經有過//介紹。
if( apiFormat != networkFormat)
{
char* buffer = new char[1012];
len = convertCodec(apiFormat, networkFormat, data, buffer, len);
data = buffer;
}
// 把發送的字節發送到Outbuff中準備發送出去;
if( (outPos + len) < OUT_BUFFER_SIZE)
{
memcpy (outBuff + outPos, data, len);
outPos += len;
}
else
{
// circular memory copy
len1 = OUT_BUFFER_SIZE - outPos;
memcpy (outBuff + outPos, data, len1);
memcpy (outBuff, data + len1, len - len1);
outPos = len - len1;
}


// check if enough data to send out packet
int packetSize = networkFormat_payloadSize;
//發送新的RTP數據包
int result = 0;
//創建新的RTP數據包
RtpPacket* p = new RtpPacket (networkFormat_payloadSize);
assert (p);
//創建RTP包頭
p->setSSRC (ssrc);
p->setPayloadType (networkFormat);
p->setPayloadUsage (packetSize);

//使用Outbuff中的數據,填充前面新創建的RTP包的內容,,每填充一個Packet包的指//針recPos向前移動一個Packet位置
while ( ((outPos + OUT_BUFFER_SIZE - recPos) % OUT_BUFFER_SIZE) >= packetSize )
{
if( (recPos + packetSize) < OUT_BUFFER_SIZE)
{ memcpy (p->getPayloadLoc(), outBuff + recPos, packetSize);
recPos += packetSize;
}
else
{ len1 = OUT_BUFFER_SIZE - recPos;
memcpy (p->getPayloadLoc(), outBuff + recPos, len1);
memcpy (p->getPayloadLoc() + len1, outBuff, packetSize - len1);
recPos = packetSize - len1;
}
//發送RTP包
result += transmit(p);
}
if( p) delete p;
p = NULL;
return result;
}

  3>上面說完了RTP包的發送和接收,現在該說說RTCP包的發送和接收了,我們知道RTCP包的目的在於向與參與會話者發送自量質量的反饋消息,實現多媒體同步的功能,不過問題在於RTCP包的數量隨着參與者的數量增加而增加,所以一般說來點對點的話,沒有必要使用RTCP控制,另外隨着RSVP的普遍應用,Qos的控制機制愈加完善,也許沒有必要用這麼低級的Qos控制方式了。
我們可以看到在SoundCardDevice::ProcessRTP中調用了RTCP的發送和接收方法::

void RtpSession::processRTCP ()
{
if (rtcpTran)
{//這裏的checkIntervalRTCP保證在固定間隔的時間內發送RTCP分組
if (checkIntervalRTCP()) transmitRTCP();
}
if (rtcpRecv)
{
receiveRTCP();
}
return ;
}
定期發送SR報告(發送者報告):
int RtpSession::transmitRTCP ()
{
… …
RtcpPacket* p = new RtcpPacket();

// load with report packet
rtcpTran->addSR(p);
//增加源描述項,在這裏僅僅是發送方發送描述項,而接收方不發送
if (tran) rtcpTran->addSDES(p);
//調用UdpStack::Trasmitto發送RTCP分組,
int ret = rtcpTran->transmit(p);

if (p) delete p;
return ret;
}
如何構造一個發送者報告和接收者報告:
int RtcpTransmitter::addSR (RtcpPacket* p, int npadSize)
{
// 創建RTCP包的頭部
RtcpHeader* header = reinterpret_cast < RtcpHeader* > (p->freeData());
int usage = p->allocData (sizeof(RtcpHeader));
//填充RTCP包頭的各個項目/版本/填充位/長度記數/包類型(SR/RR)
header->version = RTP_VERSION;
header->padding = (npadSize > 0) ? 1 : 0;
header->count = 0;
header->type = (tran) ? rtcpTypeSR : rtcpTypeRR;
//獲取當前時間戳
NtpTime nowNtp = getNtpTime();
//構造一個SR包的記錄
if (tran)
{
RtcpSender* senderInfo = reinterpret_cast < RtcpSender* > (p->freeData());
usage += p->allocData (sizeof(RtcpSender));
int diffNtp = 0;
if (nowNtp > tran->seedNtpTime)
diffNtp = nowNtp - tran->seedNtpTime;
else
if (tran->seedNtpTime > nowNtp)
diffNtp = tran->seedNtpTime - nowNtp;
RtpTime diffRtp = (diffNtp * tran->networkFormat_clockRate) / 1000;
senderInfo->ssrc = htonl(tran->ssrc);//獲得發送方的SSRC
senderInfo->ntpTimeSec = htonl(nowNtp.getSeconds());
senderInfo->ntpTimeFrac = htonl(nowNtp.getFractional());//獲得NTP時間戳
senderInfo->rtpTime = htonl(tran->seedRtpTime + diffRtp);//獲得RTP時間戳
senderInfo->packetCount = htonl(tran->packetSent);//發送的包記數
senderInfo->octetCount = htonl(tran->payloadSent);//發送的字節記數
}
… …

// report blocks
if ((rtcpRecv) && (rtcpRecv->getTranInfoCount() > 0))
{
RtpTranInfo* tranInfo = NULL;
RtpReceiver* recvInfoSpec = NULL;
RtcpReport* reportBlock = NULL;
for (int i = 0; i < rtcpRecv->getTranInfoCount(); i++)
{
tranInfo = rtcpRecv->getTranInfoList(i);
recvInfoSpec = tranInfo->recv;
… …
//cpLog (LOG_DEBUG_STACK, "RTCP: Report block for src %d",
// recvInfoSpec->ssrc);
reportBlock = reinterpret_cast < RtcpReport* > (p->freeData());
usage += p->allocData (sizeof(RtcpReport));

reportBlock->ssrc = htonl(recvInfoSpec->ssrc);
reportBlock->fracLost = calcLostFrac(tranInfo);
// 根據RFC 1889的A.3 計算包丟失率,根據接收到的包和週期內的期望接收值
//相比而得到。然後按照RTCP的頭安置要求將丟包率擺好。
u_int32_t lost = (calcLostCount(tranInfo)) & 0xffffff;
reportBlock->cumLost[2] = lost & 0xff;
reportBlock->cumLost[1] = (lost & 0xff00) >> 8;
reportBlock->cumLost[0] = (lost & 0xff0000) >> 16;
//累計丟失分組率
reportBlock->recvCycles = htons(recvInfoSpec->recvCycles);
//擴展已接收的最高序號
reportBlock->lastSeqRecv = htons(recvInfoSpec->prevSeqRecv);
//到達的時延抖動
reportBlock->jitter = htonl(recvInfoSpec->jitter >> 4);
//最末的SR時間戳
reportBlock->lastSRTimeStamp = htonl(tranInfo->lastSRTimestamp);
//最末的SR到達後的時延
if (tranInfo->lastSRTimestamp == 0)
reportBlock->lastSRDelay = 0;
else
{
NtpTime thenNtp = tranInfo->recvLastSRTimestamp;
reportBlock->lastSRDelay = 0;
if (nowNtp > thenNtp)
reportBlock->lastSRDelay = htonl(nowNtp - thenNtp);
else
reportBlock->lastSRDelay = 0;
}
// next known transmitter
header->count++;
}
}

… …
assert (usage % 4 == 0);
//定義整個RTCP包的長度。
header->length = htons((usage / 4) - 1);

return usage;
}
如何構造一個源描述相:addSDES

定期接收RTCP包的程序:
int RtpSession::receiveRTCP ()
{
… …
//通過GetPacket的方法從Udp通道中讀出RTCP分組,注:這個方法和RTP的讀分組方法基本//一樣
RtcpPacket* p = rtcpRecv->getPacket();
… …
if (rtcpRecv->readRTCP(p) == 1)
{
ret = 1;
}

if (p) delete p;
return ret;
}
我們下面來看一下,每一種RTCP包的處理過程:
int RtcpReceiver::readRTCP (RtcpPacket* p)
{
//begin和end均爲RTCP隊列接收的頭尾。
char* begin = reinterpret_cast < char* > (p->getPacketData());
char* end = reinterpret_cast < char* > (begin + p->getTotalUsage());
RtcpHeader* middle = NULL;
int ret = 0;
//掃描整個隊列處理RTCP分組
while (begin < end)
{
middle = reinterpret_cast < RtcpHeader* > (begin);
switch (middle->type)
{
case (rtcpTypeSR):
case (rtcpTypeRR):
readSR (middle);//處理SR分組
break;
case (rtcpTypeSDES):
readSDES (middle);//處理SDES分組
break;
case (rtcpTypeBYE):
if ( readBYE (middle) == 0)//處理Bye分組
{
ret = 1;
}
break;
case (rtcpTypeAPP):
readAPP (middle);//處理App分組
break;
default:
break;
}
begin += (ntohs(middle->length) + 1) * sizeof(u_int32_t);
}
return ret;
}
我們以處理SR/RR分組爲例子,看一下如何處理RTCP分組消息的:
void RtcpReceiver::readSR (RtcpHeader* head)
{
char* middle = NULL;

NtpTime nowNtp = getNtpTime();
if (head->type == rtcpTypeSR)
{
RtcpSender* senderBlock = reinterpret_cast < RtcpSender* >
((char*)head + sizeof(RtcpHeader));
RtpTranInfo* s = findTranInfo(ntohl(senderBlock->ssrc));
s->lastSRTimestamp = (ntohl(senderBlock->ntpTimeSec) << 16 |
ntohl(senderBlock->ntpTimeFrac) >> 16);
s->recvLastSRTimestamp = nowNtp;
packetReceived++;//包接收記數增加一

NtpTime thenNtp ( ntohl(senderBlock->ntpTimeSec),
ntohl(senderBlock->ntpTimeFrac) );
//下面兩個數值都可以被應用層直接調用,是應用層瞭解目前的RTP流的傳輸狀況
accumOneWayDelay += (nowNtp - thenNtp);//在時間區段內RTP包抵達的總延遲
avgOneWayDelay = accumOneWayDelay / packetReceived;//平均延遲
middle = (char*)head + sizeof(RtcpHeader) + sizeof(RtcpSender);
}
else
{
middle = (char*)head + sizeof(RtcpHeader);

RtpSrc* sender = reinterpret_cast < RtpSrc* > (middle);
RtpSrc ssrc;

ssrc = ntohl(*sender);
middle += sizeof(RtpSrc);

packetReceived++;
}
RtcpReport* block = reinterpret_cast < RtcpReport* > (middle);
for (int i = head->count; i > 0; i--)
{
//下面兩個數值都可以被應用層直接調用,是應用層瞭解目前的RTP流的接收狀況
NtpTime thenNtp (ntohl(block->lastSRTimeStamp) >> 16,
ntohl(block->lastSRTimeStamp) << 16 );

NtpTime nowNtp1 (nowNtp.getSeconds() & 0x0000FFFF,
nowNtp.getFractional() & 0xFFFF0000);
accumRoundTripDelay += ((nowNtp1 - thenNtp)
- ntohl(block->lastSRDelay)); 在時間區段內RTP包接收的總延遲
avgRoundTripDelay = accumRoundTripDelay / packetReceived;// 在時間區段內RTP包接收// 的平均延遲
++block;
}
}

3.2.8.3 ACK消息的處理過程OpAck:

1.3 OpAck,這個操作主要是在被叫收到ACK消息後的處理過程,我們在這裏先期做介紹。
const Sptr < State >
OpAck::process( const Sptr < SipProxyEvent > event )
{
... ...
if ( sipMsg->getType() != SIP_ACK )
{
return 0;
}

Sptr < UaCallInfo > call;
call.dynamicCast( event->getCallInfo() );
assert( call != 0 );
... ...
//接收到SDP以後從UaCallInfo中提取出對端的SDP,打開聲音通道的RTP/RTCP,這個過程的處理機制可以參看//OpStartAudioDuplex::process
Sptr < SipSdp > remoteSdp = call->getRemoteSdp();
startAudio( localSdp, remoteSdp );
... ...
return 0;
}

3.2.8.4 OpConfTargetOk多方會議檢測:

  OpConfTargetOk,表示多方會議時候的檢測機制,這個機制在目前的設定中沒有使用,所以沒有必要介紹

  OpFwdDigit,在打開RTP/RTCP媒體通道以後,如果這個時候定義了通話轉接呼叫的方式,那麼按下0-9的按紐,那麼該方法通過以下的流程:

UaDevice::getDeviceQueue()->add( signal )-->ResGwDevice::processSessionMsg-->
CaseHardwareSignalType:...-->provideSignal-->provideDtmf-->OpAddDigit ::process--> UaDevice::instance()->getDigitCollector()->addDigit( digit )

  將所輸入的號碼存儲在DigitCollector中,如果通話繼續呼叫方式有效,那麼在操作隊列中增加addOperator( new OpSecondCall ),在這個新增加的操作符中重新開始向新的一端發送Invite消息(根據輸入的Digit形成被叫的Url)從而實現呼叫從一端轉接到另外一端的方式。

3.2.9呼叫等待:

  呼叫等待是SIP電話系統中一個比較有用的應用,在 RFC2543對這個應用也做了一些描述,主要的方法是向在通話過程中向等待方發送一個INVITE消息,消息中包括了一個將本地的SDP的C=選項的地址改變成"0.0.0.0"同時爲了和上一個INVITE消息區分Cseq項增加1,通過這樣實現抑制本地的媒體流。

我們看一下流程:


(點擊放大)

3.2.9.1 呼叫等待的詳細描述:(以Diagram.17爲例)

a. A,B兩個端點通過RTP/RTCP進行語音通訊;
b. B接收到了C的一個呼叫(Invite消息),這個時候B處於OpRing的狀態中,B向C發送Ring表示已經收到C的呼叫,並且讓C處於等待B摘機的狀態;
c.這個時候B進入OpStartCallWaitting狀態,在這個狀態裏,捕捉終端接收的DeviceEventFlash信號,也就是Flash信號,這樣把當前的A,B RTP會話陷入Hold狀態,也就是保持狀態,B把當前的會話的ID號放置入CallWaitingID的隊列中去進行等待;
d. B在OpStartCallWaiting中向A發送Reinvite消息,這個INVITE消息的SDP的C=選項的地址改變成"0.0.0.0",這個時候A在OpReinvite狀態中, B的通話暫時陷入停止,進入StateOnHold狀態中
c. B和C開始進行通訊;
d. C掛機發送Bye消息給B這個時候B進入OpEndCall狀態;
e. B在這個狀態的時候檢測到在呼叫等待,B進入到OpConvertCW中,並且把等待隊列中的CallID帶入myevent隊列中準備執行;如果這個時候捕捉到終端接收的DeviceEventFlash信號,OpRrevert操作向A發送Reinvite消息,恢復和A的通訊;
f.A,B之間開始通訊;

3.2.9.2操作之間存在的競爭:

  從上面來看在操作中存在這一定的競爭,A,B之間通訊進入終止以後,是進入的StateOnHold狀態,同樣在B,C之間的通訊,在在StateInCall狀態的時候,用戶也有可能發出DeviceEventFlash消息,迫使B重入StateOnHold狀態,而不是在對方發出Bye消息以後,這樣的結果就是B在StateOnHold狀態無法返回,修改的方法其實非常簡單,只要這樣就可以了:

  addOperator( new OpRevert )改成
  addEntryOperator(new OpRevert);
  爲什這麼改呢?把OpRevert放在不同的隊列中,這樣,從StateInCall狀態轉入StateOnHold的時候,就不是隻有一個FlashEvent的條件提供判斷了,狀態的變化需要通過State:::Process來執行,這樣就增加了一個約束的條件,大家不明白的話可以細看一下State::Process(…)的代碼。

3.2.9.3 呼叫中所涉及模塊介紹:

  以下對呼叫等待所涉及到的一些模塊和方法的簡單介紹:
a.OpStartCallWaiting的應用:
OpStartCallWaitting主要是檢驗是否有進入呼叫等待DeviceEventFlash信號,並且把當前的對話切換到等待狀態,而當前的等待切換爲當前對話,並且向等待的一方發送Re-Invite的hold消息。
OpStartCallWaiting::process( const Sptr < SipProxyEvent > event )
{
//如果這個時刻,出現C端呼叫B端的情況,如果B端要轉移呼叫到C那麼按下"f"代表呼叫轉//移。
if ( deviceEvent->type != DeviceEventFlash )
{
return 0;
}
//注意這個時候C呼叫的CallID已經被裝入callwaitinglist中準備調用;
Sptr < SipCallId > call2Id = UaDevice::instance()->getCallWaitingId();
if ( call2Id == 0 )
{
// no call on call waiting
return 0;
}


if ( UaConfiguration::instance()->getCallWaitingOn() )
{
//通知當前的等待隊列中的消息準備準備開始和B進行通訊,主要方式是把它的Call ID掛入//myeventQ中,由SipThread處理在隊列裏的消息。
Sptr < UaCallContainer > calls;
calls.dynamicCast( event->getCallContainer() );
assert( calls != 0 );
Sptr < UaCallInfo > call2 = calls->findCall( *call2Id );

Sptr < Fifo < Sptr < SipProxyEvent > > > eventQ = deviceEvent->getFifo();
Sptr < UaDeviceEvent > event = new UaDeviceEvent( eventQ );
event->type = DeviceEventFlash;
event->callId = call2Id;
eventQ->add( event );

// 把當前的A-B通訊裝入等待隊列中等待;
Sptr < SipCallId > callId = UaDevice::instance()->getCallId();
UaDevice::instance()->setCallId( 0 );
UaDevice::instance()->addCallWaitingId( callId );
}
//準備回送給A端發送SDP的"C"爲0.0.0.0的Invite消息,並且在Cseq爲上一個的累加;
Sptr < UaCallInfo > call;
call.dynamicCast( event->getCallInfo() );
assert( call != 0 );

// Put current contact on hold
Sptr < InviteMsg > reInvite;

Sptr < Contact > contact = call->getContact();
assert( contact != 0 );

int status = contact->getStatus();
if ( status == 200 )
{
//在作爲呼叫方的時候,這個Invite消息非常好製作,大部分只要複製上一個的一些內//容就可以了。
const StatusMsg& msg = contact->getStatusMsg();
if ( &msg != 0 )
{
reInvite = new InviteMsg( msg );

//add SDP
Sptr < SipSdp > localSdp = call->getLocalSdp();
assert( localSdp != 0 );
SipSdp sipSdp = *localSdp;
reInvite->setContentData( &sipSdp );
}
… …
}
else
{
//在作爲被叫方的時候,這個Invite消息就比較麻煩了,基本上要重新創立。

const InviteMsg& msg = contact->getInviteMsg();
if ( &msg != 0 )
{
string sipPort = UaConfiguration::instance()->getLocalSipPort();
reInvite = new InviteMsg( msg.getFrom().getUrl(),
atoi( sipPort.c_str() ) );
SipFrom from( msg.getTo().getUrl() );
reInvite->setFrom( from );

reInvite->setCallId( msg.getCallId() );

// Convert RecordRoute to reverse Route
int numRecordRoute = msg.getNumRecordRoute();
SipRecordRoute recordroute;
SipRoute route;

for ( int i = 0; i < numRecordRoute; i++ )
{
recordroute = msg.getRecordRoute( i );
route.setUrl( recordroute.getUrl() );
reInvite->setRoute( route ); // to beginning
}

int numContact = msg.getNumContact();
if ( numContact )
{
SipContact contact = msg.getContact( numContact - 1 );
route.setUrl( contact.getUrl() );
reInvite->setRoute( route ); // to beginning
}

}

}
assert( reInvite != 0 );

SipVia sipVia;
sipVia.setprotoVersion( "2.0" );
sipVia.setHost( Data( theSystem.gethostAddress() ) );
sipVia.setPort( atoi( UaConfiguration::instance()->getLocalSipPort().c_str() ) );
reInvite->flushViaList();
reInvite->setVia( sipVia, 0 );

// Set Contact: header
Sptr< SipUrl > myUrl = new SipUrl;
myUrl->setUserValue( UaConfiguration::instance()->getUserName(), "phone" );
myUrl->setHost( Data( theSystem.gethostAddress() ) );
myUrl->setPort( atoi( UaConfiguration::instance()->getLocalSipPort().c_str() ) );
SipContact me;
me.setUrl( myUrl );
reInvite->setNumContact( 0 ); // Clear
reInvite->setContact( me );

//TODO Is it going to be a problem if the other side also use the next
//TODO CSeq at the same time?
unsigned int cseq = contact->getCSeqNum();
contact->setCSeqNum( ++cseq );
SipCSeq sipCSeq = reInvite->getCSeq();
sipCSeq.setCSeq( cseq );
reInvite->setCSeq( sipCSeq );

Sptr<SipSdp> sipSdp;
sipSdp.dynamicCast ( reInvite->getContentData( 0 ) );
assert ( sipSdp != 0 );
SdpSession sdpDesc = sipSdp->getSdpDescriptor();
//在這裏把SDP的C=0.0.0.0(hold項)設定;
sdpDesc.setHold();
sipSdp->setSdpDescriptor( sdpDesc );
//發送Reinvite消息到A端。
deviceEvent->getSipStack()->sendAsync( *reInvite );
Sptr < UaStateMachine > stateMachine;
stateMachine.dynamicCast( event->getCallInfo()->getFeature() );
assert( stateMachine != 0 );
//轉程序到StateOnhold
return stateMachine->findState( "StateOnhold" );
}
b.OpReinvite:
OpReinvite在接收到由通訊的對端Invite消息以後,把消息內的 RemoteSDP放在本地的UaCallInfo中然後回送一個OK消息給對端;
c.OpEndCall:
OpEndCall在檢測到Bye消息發送以後,讓程序回送OK消息並且進入StateCallEnded狀態中;
d.OpRevert:
OpRevert檢測到再次有DeviceEventFlash消息的時候本地開始發送INVITE消息,把等待方`處於等待的呼叫喚起;
e.StateCallEnded狀態:
StateCallEnded同樣是檢測DeviceEventFlash的消息,檢測到以後把調用OpConvertCw的操作,把處於等待隊列裏的呼叫喚起。

(未完待續)

發佈了14 篇原創文章 · 獲贊 1 · 訪問量 6萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章