在Vovida的基础上实现自己的SIP协议栈(四)

在Vovida的基础上实现自己的SIP协议栈(五)

卢政 2003/08/07

3.2.8.2处理RTP/RTCP包:

  前面说了ResGwDevice::processSessionMsg处理挂在设备处理队列里的各个命令,我们具体来看具体的应用程序处理过程:

a.处理用户发出的终端消息,并且打开设备发送媒体包。
ResGwDevice::processSessionMsg( Sptr event ):
void ResGwDevice::processSessionMsg( Sptr event )
{
Sptr msg;
msg.dynamicCast( event );
if( msg != 0 )
{
cpLog( LOG_DEBUG, "Got message type: %d", msg->type );
switch( msg->type )
{
case HardwareSignalType://这个状态是为Voicamail而设定的,在Feauture
//Server这章里面会说道
……
case HardwareAudioType:
switch ((msg->signalOrRequest).request.type)
{
case AudioStart://打开声音设备建立RTP/RTCP会话
audioStart((msg->signalOrRequest).request);
break;
case AudioStop://停止声音设备,并且释设备占用的资源并且停止建立的
//话,将其资源释放;
audioStop();
break;
case AudioSuspend:
audioSuspend();//暂停设备,但是资源不释放,RTP会话也不停止。
break;
case AudioResume:
audioResume((msg->signalOrRequest).request);//重新启动设备
break;
default:
cpLog( LOG_ERR, "Unknown audio request: %d",
(msg->signalOrRequest).request.type );
}
break;
… …
}
}
}

b.根据远端和本地的SDP建立RTP会话(经过简化):
int SoundCardDevice::audioStart( const HardwareAudioRequest& request )
{
deviceMutex.lock();

// create new audioStack for this audio session
// 0 is rtpPayloadPCUM
// last paramter, -1, disables jitter buffer
if( audioStack == 0 )
{
int remoteRtcpPort = (request.remotePort > 0) ? request.remotePort + 1 : 0;
int localRtcpPort = (request.localPort > 0) ? request.localPort + 1 : 0;
cerr << "%%% Remote rtcp port : " << remoteRtcpPort << "\n";
cerr << "%%% Local rtcp port : " << localRtcpPort << "\n\n";
const char* remoteHost = 0;
if ( request.remotePort != 0 )
remoteHost = request.remoteHost;
//创建RTP会话,带入的参数有:被地本地/远端的主机/RTP,RTCP,端口RTP的载荷类型,//网络承载类型,创建接收/发送RTP/RTCP包的控制台,以及接受播放的缓冲区Inbuff
audioStack = new RtpSession( remoteHost, request.remotePort,
request.localPort, remoteRtcpPort,
localRtcpPort, rtpPayloadPCMU,
rtpPayloadPCMU, 0 );
}
else
{
… …
}
//决定是否开启/关断向远方回送的震铃
if( request.sendRingback )
startSendRingback();
else
stopSendRingback();
… …
// apiFormat_clockRate
// apiFormat_payloadSize
//设置RTP包的承载类型,目前设置为PCMU方式,以及包的大小
audioStack->setApiFormat( rtpPayloadPCMU, request.rtpPacketSize*8 );
//传输/接收时的RTP包的大小,这里设置成和RTP包相同大小类型
audioStack->setNetworkFormat( rtpPayloadPCMU, request.rtpPacketSize*8 );
deviceMutex.unlock();
reopenAudioHardware();
return 0;
}

c.如何接收或者发送RTP/RTCP数据包:
我们在前面已经看到了在SoundCardDevice::processRTP ()调用了RTPSession::Receive()以及RTPSession::TransimitterRAW()的方法,来接收/发送RTP,RTCP数据流.
1> RTP数据流的接收,它不会直接删除数据,但是会用替代的方式对inbuff数据做更新:
RtpPacket* RtpReceiver::receive ()
{
RtpPacket* p = NULL;
int len = 0;
int len1 = 0;
int silencePatched = 0;
bool faking = 0;


// empty network que
NtpTime arrival (0, 0);
while (1) // network empty or time to play return packet
{ //从网络设备的缓冲队列中取出数据
p = getPacket();
if (p == NULL) break;

// only play packets for valid sources
if (probation < 0)
{
cpLog(LOG_ERR, "****Packet from invalid source");
delete p;
p = NULL;
continue;
}
//获取包的抵达时间
arrival = getNtpTime();
int packetTransit = 0;
int delay = 0;


rtp_ntohl(p);

// convert codec
if (p->getPayloadType() != apiFormat)
{
#ifndef __sparc
// 当前接收到的包不符合目前的格式(假设为PCMU格式,那么下面做格式转换)
//例如mono转PCMU8K 16Bit具体的调用格式可以参看//convertCodec(RtpPayloadType fromType, RtpPayloadType toType,
// char* in_data, char* out_data, int len)
//它的主要作用在于将数据区内的数据进行格式转换,至于带入的参数我想就不//用过多解释了。有兴趣的同志可以参考
//unsigned char linear2ulaw( int pcm_val );
//int ulaw2linear( unsigned char u_val )这两个函数的原代码
RtpPacket* oldp = p;
p = convertRtpPacketCodec (apiFormat, oldp);
… …
#endif
}
//取得有效载荷的长度
len = p->getPayloadUsage();
if (len <= 0 || len > 1012)
{
delete p;
p = NULL;
continue;
}

// 重新调整接收到的RTP包的长度,使长度在网络承载允许范围内
if (len > networkFormat_payloadSize )
{
int lenold = len;
len = ( len / networkFormat_payloadSize ) * networkFormat_payloadSize;
p->setPayloadUsage( len );
network_pktSampleSize = (lenold / networkFormat_payloadSize) * network_pktSampleSize;
}

… …
根据接收到RTP的分组序号和时间戳标志的数据对Inbuff里的数据包进行重排,
if (RtpSeqGreater(p->getSequence(), prevSeqRecv))
{
在这里是把包增加到数据的队列尾
while (RtpSeqGreater(p->getSequence(), prevSeqRecv))
{
silencePatched = 0;
faking = 0;
//下面程序部分是在收到的分组头部增加白燥声。
while( RtpTimeGreater( p->getRtpTime() - network_pktSampleSize, prevPacketRtpTime ) && ((p->getSequence() - 1) == prevSeqRecv))
{
if( silenceCodec == 0 )//
{
cpLog( LOG_DEBUG_STACK, "Patching silence" );
if ((p->getPayloadType() >= rtpPayloadDynMin) &&
(p->getPayloadType() <= rtpPayloadDynMax) &&
(codecString[0] != '\0'))
{
silenceCodec = findSilenceCodecString(codecString, len);
}
else
{//添加白噪音
silenceCodec = findSilenceCodec( p->getPayloadType(), len );
}
if( silenceCodec == 0 )
{
if( len > rtpCodecInfo[ numRtpCodecInfo - 1 ].length )
{
assert( 0 );
}
silenceCodec = (char*)&rtpCodecInfo[ numRtpCodecInfo - 1 ].silence;
faking = 1;
}
}
assert( silenceCodec );

if ((inPos + len) < IN_BUFFER_SIZE)
{
memcpy (inBuff + inPos, silenceCodec, len);
inPos += len;
silencePatched++;
}
else
{
// circular memory copy
len1 = IN_BUFFER_SIZE - inPos;
memcpy (inBuff + inPos, silenceCodec, len1);
memcpy (inBuff, silenceCodec + len1, len - len1);
inPos = len - len1;
//printf("inPos S=%d\n", inPos);
silencePatched++;
}
prevPacketRtpTime += network_pktSampleSize;
}
if( prevPacketRtpTime != p->getRtpTime() - network_pktSampleSize)
{
prevPacketRtpTime = p->getRtpTime() - network_pktSampleSize;
}
//在inbuff队列中插入已经待播放的分组,


if ((inPos + len) < IN_BUFFER_SIZE)
{
memcpy (inBuff + inPos, p->getPayloadLoc(), len);
inPos += len;
}
else
{
// circular memory copy
len1 = IN_BUFFER_SIZE - inPos;
memcpy (inBuff + inPos, p->getPayloadLoc(), len1);
memcpy (inBuff, p->getPayloadLoc() + len1, len - len1);
inPos = len - len1;
}

//更新受到包的计数器
RtpSeqNumber tSeq = prevSeqRecv;
prevSeqRecv++;
if(prevSeqRecv > RTP_SEQ_MOD)
{
prevSeqRecv = 0;
}
if (prevSeqRecv < tSeq)
{
cpLog(LOG_DEBUG_STACK, "Recv cycle");
assert(prevSeqRecv == 0);
recvCycles += RTP_SEQ_MOD;
}
}
prevPacketRtpTime = p->getRtpTime();
if (silencePatched > 0)
cpLog(LOG_DEBUG_STACK, "silencePatched = %d", silencePatched);
if (faking)
silenceCodec = 0;
if (p->getSequence() != prevSeqRecv)
{
cpLog(LOG_DEBUG_STACK, "Unequal packet:%d stack:%d",
prevSeqRecv, p->getSequence());
}
}
else
{
RtpSeqNumber base_prevSeqRecv = prevSeqRecv;
int inSeqRecv = 1;
while (RtpSeqGreater(base_prevSeqRecv, p->getSequence()))
{
inSeqRecv++;
base_prevSeqRecv--;
}
int inPosTemp = inPos - inSeqRecv * len;
if (inPosTemp < 0) inPosTemp = IN_BUFFER_SIZE + inPosTemp;

if ((inPosTemp + len) < IN_BUFFER_SIZE)
{
memcpy (inBuff + inPosTemp, p->getPayloadLoc(), len);
}
else
{
// circular memory copy
len1 = IN_BUFFER_SIZE - inPosTemp;
memcpy (inBuff + inPosTemp, p->getPayloadLoc(), len1);
memcpy (inBuff, (p->getPayloadLoc()) + len1, len - len1);
}
}

// update packet received
packetReceived++;
payloadReceived += len;

// update jitter calculation
packetTransit = arrival - rtp2ntp(p->getRtpTime());
delay = packetTransit - transit;
transit = packetTransit;
if (delay < 0) delay = -delay;
jitter += delay - ((jitter + 8) >> 4);

// fractional
// s->jitterTime += (1./16.) * ((double)deley - s->jitterTime);
// integer
//jitterTime += delay - ((jitterTime+8) >> 4);


if (p)
{
delete p;
p = NULL;
}
}

int packetSize = apiFormat_payloadSize;
… …

//按照apiformat_playloadsize的长度,分割原有的数据包,重新构造一个RTP数据包以便适
//合设备播放,当然如果双方把apiformat_playloadsize和networkFormat_payloadSize设置相
//同也可以。
assert (!p);
p = new RtpPacket (packetSize);
if ( (playPos + packetSize) < IN_BUFFER_SIZE)
{
memcpy (p->getPayloadLoc(), inBuff + playPos, packetSize);
playPos += packetSize;
}
else
{
len1 = IN_BUFFER_SIZE - playPos;
memcpy (p->getPayloadLoc(), inBuff + playPos, len1);
memcpy (p->getPayloadLoc() + len1, inBuff, packetSize - len1);
playPos = packetSize - len1;
}

//构造RTP数据包的包头部分
p->setSSRC (ssrc);
p->setPayloadType (apiFormat);
p->setPayloadUsage (packetSize);
p->setRtpTime (prevRtpTime + api_pktSampleSize);
p->setSequence (prevSeqPlay + 1);

if (probation > 0) probation --;
receiverError = recv_success;
prevRtpTime = p->getRtpTime();
prevNtpTime = getNtpTime();
gotime = rtp2ntp (p->getRtpTime() + api_pktSampleSize) + jitterTime;
//更新已经播放的数据包的计数器
RtpSeqNumber sSeq = prevSeqPlay;
prevSeqPlay++;
if (prevSeqPlay < sSeq)
{
playCycles += RTP_SEQ_MOD;
}

return p;
}

2> RTP数据流的发送:
  RTPSession::TransimitterRAW方法,RTP数据流的发送方法,发送没有接收这么复杂,不需要针对Buff中数据按照包序列号和NTP排序,最后再根据本地的包长度充足重组,只要写入Outbuff中直接加上RTP头就可以直接发送了。
int RtpTransmitter::transmitRaw (char* data, int len)
{
int len1;
//如果媒体设备所能接受的长度制式和网络传输的制式不能相符匹配,那么调用转换程序进行转//化,把本地播放制式长度转换成网络制式长度。ConvertCodec这个函数我们在前面已经有过//介绍。
if( apiFormat != networkFormat)
{
char* buffer = new char[1012];
len = convertCodec(apiFormat, networkFormat, data, buffer, len);
data = buffer;
}
// 把发送的字节发送到Outbuff中准备发送出去;
if( (outPos + len) < OUT_BUFFER_SIZE)
{
memcpy (outBuff + outPos, data, len);
outPos += len;
}
else
{
// circular memory copy
len1 = OUT_BUFFER_SIZE - outPos;
memcpy (outBuff + outPos, data, len1);
memcpy (outBuff, data + len1, len - len1);
outPos = len - len1;
}


// check if enough data to send out packet
int packetSize = networkFormat_payloadSize;
//发送新的RTP数据包
int result = 0;
//创建新的RTP数据包
RtpPacket* p = new RtpPacket (networkFormat_payloadSize);
assert (p);
//创建RTP包头
p->setSSRC (ssrc);
p->setPayloadType (networkFormat);
p->setPayloadUsage (packetSize);

//使用Outbuff中的数据,填充前面新创建的RTP包的内容,,每填充一个Packet包的指//针recPos向前移动一个Packet位置
while ( ((outPos + OUT_BUFFER_SIZE - recPos) % OUT_BUFFER_SIZE) >= packetSize )
{
if( (recPos + packetSize) < OUT_BUFFER_SIZE)
{ memcpy (p->getPayloadLoc(), outBuff + recPos, packetSize);
recPos += packetSize;
}
else
{ len1 = OUT_BUFFER_SIZE - recPos;
memcpy (p->getPayloadLoc(), outBuff + recPos, len1);
memcpy (p->getPayloadLoc() + len1, outBuff, packetSize - len1);
recPos = packetSize - len1;
}
//发送RTP包
result += transmit(p);
}
if( p) delete p;
p = NULL;
return result;
}

  3>上面说完了RTP包的发送和接收,现在该说说RTCP包的发送和接收了,我们知道RTCP包的目的在于向与参与会话者发送自量质量的反馈消息,实现多媒体同步的功能,不过问题在于RTCP包的数量随着参与者的数量增加而增加,所以一般说来点对点的话,没有必要使用RTCP控制,另外随着RSVP的普遍应用,Qos的控制机制愈加完善,也许没有必要用这么低级的Qos控制方式了。
我们可以看到在SoundCardDevice::ProcessRTP中调用了RTCP的发送和接收方法::

void RtpSession::processRTCP ()
{
if (rtcpTran)
{//这里的checkIntervalRTCP保证在固定间隔的时间内发送RTCP分组
if (checkIntervalRTCP()) transmitRTCP();
}
if (rtcpRecv)
{
receiveRTCP();
}
return ;
}
定期发送SR报告(发送者报告):
int RtpSession::transmitRTCP ()
{
… …
RtcpPacket* p = new RtcpPacket();

// load with report packet
rtcpTran->addSR(p);
//增加源描述项,在这里仅仅是发送方发送描述项,而接收方不发送
if (tran) rtcpTran->addSDES(p);
//调用UdpStack::Trasmitto发送RTCP分组,
int ret = rtcpTran->transmit(p);

if (p) delete p;
return ret;
}
如何构造一个发送者报告和接收者报告:
int RtcpTransmitter::addSR (RtcpPacket* p, int npadSize)
{
// 创建RTCP包的头部
RtcpHeader* header = reinterpret_cast < RtcpHeader* > (p->freeData());
int usage = p->allocData (sizeof(RtcpHeader));
//填充RTCP包头的各个项目/版本/填充位/长度记数/包类型(SR/RR)
header->version = RTP_VERSION;
header->padding = (npadSize > 0) ? 1 : 0;
header->count = 0;
header->type = (tran) ? rtcpTypeSR : rtcpTypeRR;
//获取当前时间戳
NtpTime nowNtp = getNtpTime();
//构造一个SR包的记录
if (tran)
{
RtcpSender* senderInfo = reinterpret_cast < RtcpSender* > (p->freeData());
usage += p->allocData (sizeof(RtcpSender));
int diffNtp = 0;
if (nowNtp > tran->seedNtpTime)
diffNtp = nowNtp - tran->seedNtpTime;
else
if (tran->seedNtpTime > nowNtp)
diffNtp = tran->seedNtpTime - nowNtp;
RtpTime diffRtp = (diffNtp * tran->networkFormat_clockRate) / 1000;
senderInfo->ssrc = htonl(tran->ssrc);//获得发送方的SSRC
senderInfo->ntpTimeSec = htonl(nowNtp.getSeconds());
senderInfo->ntpTimeFrac = htonl(nowNtp.getFractional());//获得NTP时间戳
senderInfo->rtpTime = htonl(tran->seedRtpTime + diffRtp);//获得RTP时间戳
senderInfo->packetCount = htonl(tran->packetSent);//发送的包记数
senderInfo->octetCount = htonl(tran->payloadSent);//发送的字节记数
}
… …

// report blocks
if ((rtcpRecv) && (rtcpRecv->getTranInfoCount() > 0))
{
RtpTranInfo* tranInfo = NULL;
RtpReceiver* recvInfoSpec = NULL;
RtcpReport* reportBlock = NULL;
for (int i = 0; i < rtcpRecv->getTranInfoCount(); i++)
{
tranInfo = rtcpRecv->getTranInfoList(i);
recvInfoSpec = tranInfo->recv;
… …
//cpLog (LOG_DEBUG_STACK, "RTCP: Report block for src %d",
// recvInfoSpec->ssrc);
reportBlock = reinterpret_cast < RtcpReport* > (p->freeData());
usage += p->allocData (sizeof(RtcpReport));

reportBlock->ssrc = htonl(recvInfoSpec->ssrc);
reportBlock->fracLost = calcLostFrac(tranInfo);
// 根据RFC 1889的A.3 计算包丢失率,根据接收到的包和周期内的期望接收值
//相比而得到。然后按照RTCP的头安置要求将丢包率摆好。
u_int32_t lost = (calcLostCount(tranInfo)) & 0xffffff;
reportBlock->cumLost[2] = lost & 0xff;
reportBlock->cumLost[1] = (lost & 0xff00) >> 8;
reportBlock->cumLost[0] = (lost & 0xff0000) >> 16;
//累计丢失分组率
reportBlock->recvCycles = htons(recvInfoSpec->recvCycles);
//扩展已接收的最高序号
reportBlock->lastSeqRecv = htons(recvInfoSpec->prevSeqRecv);
//到达的时延抖动
reportBlock->jitter = htonl(recvInfoSpec->jitter >> 4);
//最末的SR时间戳
reportBlock->lastSRTimeStamp = htonl(tranInfo->lastSRTimestamp);
//最末的SR到达后的时延
if (tranInfo->lastSRTimestamp == 0)
reportBlock->lastSRDelay = 0;
else
{
NtpTime thenNtp = tranInfo->recvLastSRTimestamp;
reportBlock->lastSRDelay = 0;
if (nowNtp > thenNtp)
reportBlock->lastSRDelay = htonl(nowNtp - thenNtp);
else
reportBlock->lastSRDelay = 0;
}
// next known transmitter
header->count++;
}
}

… …
assert (usage % 4 == 0);
//定义整个RTCP包的长度。
header->length = htons((usage / 4) - 1);

return usage;
}
如何构造一个源描述相:addSDES

定期接收RTCP包的程序:
int RtpSession::receiveRTCP ()
{
… …
//通过GetPacket的方法从Udp通道中读出RTCP分组,注:这个方法和RTP的读分组方法基本//一样
RtcpPacket* p = rtcpRecv->getPacket();
… …
if (rtcpRecv->readRTCP(p) == 1)
{
ret = 1;
}

if (p) delete p;
return ret;
}
我们下面来看一下,每一种RTCP包的处理过程:
int RtcpReceiver::readRTCP (RtcpPacket* p)
{
//begin和end均为RTCP队列接收的头尾。
char* begin = reinterpret_cast < char* > (p->getPacketData());
char* end = reinterpret_cast < char* > (begin + p->getTotalUsage());
RtcpHeader* middle = NULL;
int ret = 0;
//扫描整个队列处理RTCP分组
while (begin < end)
{
middle = reinterpret_cast < RtcpHeader* > (begin);
switch (middle->type)
{
case (rtcpTypeSR):
case (rtcpTypeRR):
readSR (middle);//处理SR分组
break;
case (rtcpTypeSDES):
readSDES (middle);//处理SDES分组
break;
case (rtcpTypeBYE):
if ( readBYE (middle) == 0)//处理Bye分组
{
ret = 1;
}
break;
case (rtcpTypeAPP):
readAPP (middle);//处理App分组
break;
default:
break;
}
begin += (ntohs(middle->length) + 1) * sizeof(u_int32_t);
}
return ret;
}
我们以处理SR/RR分组为例子,看一下如何处理RTCP分组消息的:
void RtcpReceiver::readSR (RtcpHeader* head)
{
char* middle = NULL;

NtpTime nowNtp = getNtpTime();
if (head->type == rtcpTypeSR)
{
RtcpSender* senderBlock = reinterpret_cast < RtcpSender* >
((char*)head + sizeof(RtcpHeader));
RtpTranInfo* s = findTranInfo(ntohl(senderBlock->ssrc));
s->lastSRTimestamp = (ntohl(senderBlock->ntpTimeSec) << 16 |
ntohl(senderBlock->ntpTimeFrac) >> 16);
s->recvLastSRTimestamp = nowNtp;
packetReceived++;//包接收记数增加一

NtpTime thenNtp ( ntohl(senderBlock->ntpTimeSec),
ntohl(senderBlock->ntpTimeFrac) );
//下面两个数值都可以被应用层直接调用,是应用层了解目前的RTP流的传输状况
accumOneWayDelay += (nowNtp - thenNtp);//在时间区段内RTP包抵达的总延迟
avgOneWayDelay = accumOneWayDelay / packetReceived;//平均延迟
middle = (char*)head + sizeof(RtcpHeader) + sizeof(RtcpSender);
}
else
{
middle = (char*)head + sizeof(RtcpHeader);

RtpSrc* sender = reinterpret_cast < RtpSrc* > (middle);
RtpSrc ssrc;

ssrc = ntohl(*sender);
middle += sizeof(RtpSrc);

packetReceived++;
}
RtcpReport* block = reinterpret_cast < RtcpReport* > (middle);
for (int i = head->count; i > 0; i--)
{
//下面两个数值都可以被应用层直接调用,是应用层了解目前的RTP流的接收状况
NtpTime thenNtp (ntohl(block->lastSRTimeStamp) >> 16,
ntohl(block->lastSRTimeStamp) << 16 );

NtpTime nowNtp1 (nowNtp.getSeconds() & 0x0000FFFF,
nowNtp.getFractional() & 0xFFFF0000);
accumRoundTripDelay += ((nowNtp1 - thenNtp)
- ntohl(block->lastSRDelay)); 在时间区段内RTP包接收的总延迟
avgRoundTripDelay = accumRoundTripDelay / packetReceived;// 在时间区段内RTP包接收// 的平均延迟
++block;
}
}

3.2.8.3 ACK消息的处理过程OpAck:

1.3 OpAck,这个操作主要是在被叫收到ACK消息后的处理过程,我们在这里先期做介绍。
const Sptr < State >
OpAck::process( const Sptr < SipProxyEvent > event )
{
... ...
if ( sipMsg->getType() != SIP_ACK )
{
return 0;
}

Sptr < UaCallInfo > call;
call.dynamicCast( event->getCallInfo() );
assert( call != 0 );
... ...
//接收到SDP以后从UaCallInfo中提取出对端的SDP,打开声音通道的RTP/RTCP,这个过程的处理机制可以参看//OpStartAudioDuplex::process
Sptr < SipSdp > remoteSdp = call->getRemoteSdp();
startAudio( localSdp, remoteSdp );
... ...
return 0;
}

3.2.8.4 OpConfTargetOk多方会议检测:

  OpConfTargetOk,表示多方会议时候的检测机制,这个机制在目前的设定中没有使用,所以没有必要介绍

  OpFwdDigit,在打开RTP/RTCP媒体通道以后,如果这个时候定义了通话转接呼叫的方式,那么按下0-9的按纽,那么该方法通过以下的流程:

UaDevice::getDeviceQueue()->add( signal )-->ResGwDevice::processSessionMsg-->
CaseHardwareSignalType:...-->provideSignal-->provideDtmf-->OpAddDigit ::process--> UaDevice::instance()->getDigitCollector()->addDigit( digit )

  将所输入的号码存储在DigitCollector中,如果通话继续呼叫方式有效,那么在操作队列中增加addOperator( new OpSecondCall ),在这个新增加的操作符中重新开始向新的一端发送Invite消息(根据输入的Digit形成被叫的Url)从而实现呼叫从一端转接到另外一端的方式。

3.2.9呼叫等待:

  呼叫等待是SIP电话系统中一个比较有用的应用,在 RFC2543对这个应用也做了一些描述,主要的方法是向在通话过程中向等待方发送一个INVITE消息,消息中包括了一个将本地的SDP的C=选项的地址改变成"0.0.0.0"同时为了和上一个INVITE消息区分Cseq项增加1,通过这样实现抑制本地的媒体流。

我们看一下流程:


(点击放大)

3.2.9.1 呼叫等待的详细描述:(以Diagram.17为例)

a. A,B两个端点通过RTP/RTCP进行语音通讯;
b. B接收到了C的一个呼叫(Invite消息),这个时候B处于OpRing的状态中,B向C发送Ring表示已经收到C的呼叫,并且让C处于等待B摘机的状态;
c.这个时候B进入OpStartCallWaitting状态,在这个状态里,捕捉终端接收的DeviceEventFlash信号,也就是Flash信号,这样把当前的A,B RTP会话陷入Hold状态,也就是保持状态,B把当前的会话的ID号放置入CallWaitingID的队列中去进行等待;
d. B在OpStartCallWaiting中向A发送Reinvite消息,这个INVITE消息的SDP的C=选项的地址改变成"0.0.0.0",这个时候A在OpReinvite状态中, B的通话暂时陷入停止,进入StateOnHold状态中
c. B和C开始进行通讯;
d. C挂机发送Bye消息给B这个时候B进入OpEndCall状态;
e. B在这个状态的时候检测到在呼叫等待,B进入到OpConvertCW中,并且把等待队列中的CallID带入myevent队列中准备执行;如果这个时候捕捉到终端接收的DeviceEventFlash信号,OpRrevert操作向A发送Reinvite消息,恢复和A的通讯;
f.A,B之间开始通讯;

3.2.9.2操作之间存在的竞争:

  从上面来看在操作中存在这一定的竞争,A,B之间通讯进入终止以后,是进入的StateOnHold状态,同样在B,C之间的通讯,在在StateInCall状态的时候,用户也有可能发出DeviceEventFlash消息,迫使B重入StateOnHold状态,而不是在对方发出Bye消息以后,这样的结果就是B在StateOnHold状态无法返回,修改的方法其实非常简单,只要这样就可以了:

  addOperator( new OpRevert )改成
  addEntryOperator(new OpRevert);
  为什这么改呢?把OpRevert放在不同的队列中,这样,从StateInCall状态转入StateOnHold的时候,就不是只有一个FlashEvent的条件提供判断了,状态的变化需要通过State:::Process来执行,这样就增加了一个约束的条件,大家不明白的话可以细看一下State::Process(…)的代码。

3.2.9.3 呼叫中所涉及模块介绍:

  以下对呼叫等待所涉及到的一些模块和方法的简单介绍:
a.OpStartCallWaiting的应用:
OpStartCallWaitting主要是检验是否有进入呼叫等待DeviceEventFlash信号,并且把当前的对话切换到等待状态,而当前的等待切换为当前对话,并且向等待的一方发送Re-Invite的hold消息。
OpStartCallWaiting::process( const Sptr < SipProxyEvent > event )
{
//如果这个时刻,出现C端呼叫B端的情况,如果B端要转移呼叫到C那么按下"f"代表呼叫转//移。
if ( deviceEvent->type != DeviceEventFlash )
{
return 0;
}
//注意这个时候C呼叫的CallID已经被装入callwaitinglist中准备调用;
Sptr < SipCallId > call2Id = UaDevice::instance()->getCallWaitingId();
if ( call2Id == 0 )
{
// no call on call waiting
return 0;
}


if ( UaConfiguration::instance()->getCallWaitingOn() )
{
//通知当前的等待队列中的消息准备准备开始和B进行通讯,主要方式是把它的Call ID挂入//myeventQ中,由SipThread处理在队列里的消息。
Sptr < UaCallContainer > calls;
calls.dynamicCast( event->getCallContainer() );
assert( calls != 0 );
Sptr < UaCallInfo > call2 = calls->findCall( *call2Id );

Sptr < Fifo < Sptr < SipProxyEvent > > > eventQ = deviceEvent->getFifo();
Sptr < UaDeviceEvent > event = new UaDeviceEvent( eventQ );
event->type = DeviceEventFlash;
event->callId = call2Id;
eventQ->add( event );

// 把当前的A-B通讯装入等待队列中等待;
Sptr < SipCallId > callId = UaDevice::instance()->getCallId();
UaDevice::instance()->setCallId( 0 );
UaDevice::instance()->addCallWaitingId( callId );
}
//准备回送给A端发送SDP的"C"为0.0.0.0的Invite消息,并且在Cseq为上一个的累加;
Sptr < UaCallInfo > call;
call.dynamicCast( event->getCallInfo() );
assert( call != 0 );

// Put current contact on hold
Sptr < InviteMsg > reInvite;

Sptr < Contact > contact = call->getContact();
assert( contact != 0 );

int status = contact->getStatus();
if ( status == 200 )
{
//在作为呼叫方的时候,这个Invite消息非常好制作,大部分只要复制上一个的一些内//容就可以了。
const StatusMsg& msg = contact->getStatusMsg();
if ( &msg != 0 )
{
reInvite = new InviteMsg( msg );

//add SDP
Sptr < SipSdp > localSdp = call->getLocalSdp();
assert( localSdp != 0 );
SipSdp sipSdp = *localSdp;
reInvite->setContentData( &sipSdp );
}
… …
}
else
{
//在作为被叫方的时候,这个Invite消息就比较麻烦了,基本上要重新创立。

const InviteMsg& msg = contact->getInviteMsg();
if ( &msg != 0 )
{
string sipPort = UaConfiguration::instance()->getLocalSipPort();
reInvite = new InviteMsg( msg.getFrom().getUrl(),
atoi( sipPort.c_str() ) );
SipFrom from( msg.getTo().getUrl() );
reInvite->setFrom( from );

reInvite->setCallId( msg.getCallId() );

// Convert RecordRoute to reverse Route
int numRecordRoute = msg.getNumRecordRoute();
SipRecordRoute recordroute;
SipRoute route;

for ( int i = 0; i < numRecordRoute; i++ )
{
recordroute = msg.getRecordRoute( i );
route.setUrl( recordroute.getUrl() );
reInvite->setRoute( route ); // to beginning
}

int numContact = msg.getNumContact();
if ( numContact )
{
SipContact contact = msg.getContact( numContact - 1 );
route.setUrl( contact.getUrl() );
reInvite->setRoute( route ); // to beginning
}

}

}
assert( reInvite != 0 );

SipVia sipVia;
sipVia.setprotoVersion( "2.0" );
sipVia.setHost( Data( theSystem.gethostAddress() ) );
sipVia.setPort( atoi( UaConfiguration::instance()->getLocalSipPort().c_str() ) );
reInvite->flushViaList();
reInvite->setVia( sipVia, 0 );

// Set Contact: header
Sptr< SipUrl > myUrl = new SipUrl;
myUrl->setUserValue( UaConfiguration::instance()->getUserName(), "phone" );
myUrl->setHost( Data( theSystem.gethostAddress() ) );
myUrl->setPort( atoi( UaConfiguration::instance()->getLocalSipPort().c_str() ) );
SipContact me;
me.setUrl( myUrl );
reInvite->setNumContact( 0 ); // Clear
reInvite->setContact( me );

//TODO Is it going to be a problem if the other side also use the next
//TODO CSeq at the same time?
unsigned int cseq = contact->getCSeqNum();
contact->setCSeqNum( ++cseq );
SipCSeq sipCSeq = reInvite->getCSeq();
sipCSeq.setCSeq( cseq );
reInvite->setCSeq( sipCSeq );

Sptr<SipSdp> sipSdp;
sipSdp.dynamicCast ( reInvite->getContentData( 0 ) );
assert ( sipSdp != 0 );
SdpSession sdpDesc = sipSdp->getSdpDescriptor();
//在这里把SDP的C=0.0.0.0(hold项)设定;
sdpDesc.setHold();
sipSdp->setSdpDescriptor( sdpDesc );
//发送Reinvite消息到A端。
deviceEvent->getSipStack()->sendAsync( *reInvite );
Sptr < UaStateMachine > stateMachine;
stateMachine.dynamicCast( event->getCallInfo()->getFeature() );
assert( stateMachine != 0 );
//转程序到StateOnhold
return stateMachine->findState( "StateOnhold" );
}
b.OpReinvite:
OpReinvite在接收到由通讯的对端Invite消息以后,把消息内的 RemoteSDP放在本地的UaCallInfo中然后回送一个OK消息给对端;
c.OpEndCall:
OpEndCall在检测到Bye消息发送以后,让程序回送OK消息并且进入StateCallEnded状态中;
d.OpRevert:
OpRevert检测到再次有DeviceEventFlash消息的时候本地开始发送INVITE消息,把等待方`处于等待的呼叫唤起;
e.StateCallEnded状态:
StateCallEnded同样是检测DeviceEventFlash的消息,检测到以后把调用OpConvertCw的操作,把处于等待队列里的呼叫唤起。

(未完待续)

在Vovida的基础上实现自己的SIP协议栈(六)

作者联系方法:[email protected]

作者供稿 CTI论坛编辑