live555传输实时h264视频流和mp3音频流

主要是利用live555的rtsp服务器发送实时视频和音频流,并扩大视频buffer的容量来防止视频帧较大时出现数据丢失的问题。

h264的实时视频流

live555的h264视频流是参考https://blog.csdn.net/caoshangpa/article/details/53200527

主要是创建服务器的时候 sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, inputFileName,reuseFirstSource)),中的H264VideoFileServerMediaSubsession替换成自己的子会话。

H264VideoFileServerMediaSubsession类在其createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate)函数中调用了ByteStreamFileSource::createNew(envir(), fFileName),而frame的获取正是在ByteStreamFileSource类中的doGetNextFrame()函数中实现的。因此,这里需要继承H264VideoFileServerMediaSubsession和ByteStreamFileSource类,并重写其中的createNewStreamSource和doGetNextFrame函数。在doGetNextFrame获取数据替换成自己的方式即可。

代码

main.cpp中创建rtspserver的代码

    //创建任务调度器并初始化使用环境
    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
    UserAuthenticationDatabase* authDB = NULL;
    //创建RTSP服务器,开始监听模客户端的连接
    //注意这里的端口号不是默认的554端口,因此访问URL时,需指定该端口号
    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);

    if (rtspServer == NULL)
    {
        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
        exit(1);
    }

    char const* descriptionString = "Session streamed by \"video\"";

    //流名字,媒体名
    char const* streamName = "video";

    //当客户点播时,要输入流名字streamName,告诉RTSP服务器点播的是哪个流。
    //创建媒体会话,流名字和文件名的对应关系是通过增加子会话建立起来的。媒体会话对会话描述、会话持续时间、流名字等与会话有关的信息进行管理。
    //第2个参数:媒体名、3:媒体信息、4:媒体描述
    ServerMediaSession* sms= ServerMediaSession::createNew(*env, streamName, streamName ,descriptionString);

    OutPacketBuffer::maxSize = 90000;

    //修改为自己实现的H264LiveVideoServerMediaSubssion
    H264LiveVideoServerMediaSubssion* sub = H264LiveVideoServerMediaSubssion::createNew(*env, reuseFirstSource);
    sms->addSubsession(sub);


    Mp3LiveServerMediaSubssion* subAudio = Mp3LiveServerMediaSubssion::createNew(*env, reuseFirstSource);
    sms->addSubsession(subAudio);


    //为rtspserver添加session
    rtspServer->addServerMediaSession(sms);

    qInfo()<<"\n url"<<rtspServer->rtspURL(sms)<<"\n stream"<<streamName<<"\n";
    *env<<"\n url"<<rtspServer->rtspURL(sms)<<"\n stream"<<streamName<<"\n";

    media->startVideo();
    //进入事件循环,对套接字的读取事件和对媒体文件的延时发送操作都在这个循环中完成。
    env->taskScheduler().doEventLoop();

    

h264livevideoservermediasubssion.h

#ifndef H264LIVEVIDEOSERVERMEDIASUBSSION_H
#define H264LIVEVIDEOSERVERMEDIASUBSSION_H

#include "H264VideoFileServerMediaSubsession.hh"

class H264LiveVideoServerMediaSubssion : public H264VideoFileServerMediaSubsession {

public:
    static H264LiveVideoServerMediaSubssion* createNew(UsageEnvironment& env, Boolean reuseFirstSource);

protected:
    H264LiveVideoServerMediaSubssion(UsageEnvironment& env, Boolean reuseFirstSource);
    ~H264LiveVideoServerMediaSubssion();


protected:
    //重定义虚函数
    FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
};


#endif // H264LIVEVIDEOSERVERMEDIASUBSSION_H

h264livevideoservermediasubssion.cpp

#include "h264livevideoservermediasubssion.h"
#include "h264LiveFramedSource.h"
#include "H264VideoStreamFramer.hh"

H264LiveVideoServerMediaSubssion* H264LiveVideoServerMediaSubssion::createNew(UsageEnvironment& env, Boolean reuseFirstSource)
{
    return new H264LiveVideoServerMediaSubssion(env, reuseFirstSource);
}

H264LiveVideoServerMediaSubssion::H264LiveVideoServerMediaSubssion(UsageEnvironment& env, Boolean reuseFirstSource)
: H264VideoFileServerMediaSubsession(env, 0, reuseFirstSource)
{

}

H264LiveVideoServerMediaSubssion::~H264LiveVideoServerMediaSubssion()
{
}

FramedSource* H264LiveVideoServerMediaSubssion::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate)
{
    //estimate bitrate:估计的比特率,记得根据需求修改
    estBitrate = 100; // kbps
    //创建视频源

    H264LiveFramedSource* liveSource = H264LiveFramedSource::createNew(envir());
    if (liveSource == NULL)
    {
        return NULL;
    }

    //为视频流创建Framer
    return H264VideoStreamFramer::createNew(envir(), liveSource);
}

h264liveframedsource.h

#ifndef _H264LIVEFRAMEDSOURCE_H
#define _H264LIVEFRAMEDSOURCE_H


#include "ByteStreamFileSource.hh"
#include "UsageEnvironment.hh"

class H264LiveFramedSource : public ByteStreamFileSource
{
public:
    static H264LiveFramedSource* createNew(UsageEnvironment& env, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0);
    unsigned int maxFrameSize() const;

protected:
    H264LiveFramedSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame);
    ~H264LiveFramedSource();


private:
    //重定义虚函数
    virtual void doGetNextFrame();
};

#endif

h264liveframedsource.cpp:

#include "h264LiveFramedSource.h"
#include "GroupsockHelper.hh"
#include <QByteArray>


H264LiveFramedSource::H264LiveFramedSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame)
: ByteStreamFileSource(env, 0, preferredFrameSize, playTimePerFrame)
{

}

H264LiveFramedSource* H264LiveFramedSource::createNew(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame)
{
    H264LiveFramedSource* newSource = new H264LiveFramedSource(env, preferredFrameSize, playTimePerFrame);
    return newSource;
}

H264LiveFramedSource::~H264LiveFramedSource()
{

}

// This function is called when new frame data is available from the device.
// We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
// 'in' parameters (these should *not* be modified by this function):
//     fTo: The frame data is copied to this address.
//         (Note that the variable "fTo" is *not* modified.  Instead,
//          the frame data is copied to the address pointed to by "fTo".)
//     fMaxSize: This is the maximum number of bytes that can be copied
//         (If the actual frame is larger than this, then it should
//          be truncated, and "fNumTruncatedBytes" set accordingly.)
// 'out' parameters (these are modified by this function):
//     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
//     fNumTruncatedBytes: Should be set iff the delivered frame would have been
//         bigger than "fMaxSize", in which case it's set to the number of bytes
//         that have been omitted.
//     fPresentationTime: Should be set to the frame's presentation time
//         (seconds, microseconds).  This time must be aligned with 'wall-clock time' - i.e., the time that you would get
//         by calling "gettimeofday()".
//     fDurationInMicroseconds: Should be set to the frame's duration, if known.
//         If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
//         to set this variable, because - in this case - data will never arrive 'early'.


void H264LiveFramedSource::doGetNextFrame()
{

    QByteArray data = nullptr;
    //这里获取实时自己的实时数据
    // media_->getData(data);
    

    fFrameSize = data.size();
    if (fFrameSize > fMaxSize)
    {
        fNumTruncatedBytes = fFrameSize - fMaxSize;
        fFrameSize = fMaxSize;
        envir()<<"frame size "<<fFrameSize<<" MaxSize size "<<fMaxSize<<"fNumTruncatedBytes\n";
    }
    else
    {
        fNumTruncatedBytes = 0;
    }


    if(data.size()!=0){
        //把得到的实时数据复制进输出端
        memmove(fTo, data.data(), fFrameSize);
    }

    gettimeofday(&fPresentationTime, NULL);//时间戳

    //表示延迟0秒后再执行afterGetting函数,也可以直接用afterGetting(this)
    nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this);
//    nextTask() = (TaskFunc*)FramedSource::afterGetting(this);


}

//这里返回的数值是BANK_SIZE的最大值
unsigned int H264LiveFramedSource::maxFrameSize() const
{
    return 300000;
}

 

mp3音频流

原来参考https://blog.csdn.net/taixinlfx/article/details/8854440进行改写,没有调通。

后面使用了和h264视频一样的方式,在testProgs文件夹下的 testOnDemandRTSPServer.cpp 的demo找到mp3对应的subssion类为MP3AudioFileServerMediaSubsession,进行类似的改写。

创建服务器的时候 sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(*env, inputFileName,reuseFirstSource)),中的MP3AudioFileServerMediaSubsession替换成自己的子会话。

MP3AudioFileServerMediaSubsession类在其createNewStreamSource函数中调用了ByteStreamFileSource::createNew,而frame的获取是在ByteStreamFileSource类中的doGetNextFrame()函数中实现的。因此,这里需要继承MP3AudioFileServerMediaSubsession和ByteStreamFileSource类,并重写其中的createNewStreamSource和doGetNextFrame函数。在doGetNextFrame获取数据替换成自己的方式即可。

因此对于要传输其他格式的实时数据流都可以尝试此方式进行改写。

代码:

mp3liveservermediasubssion.h

#ifndef MP3LIVESERVERMEDIASUBSSION_H
#define MP3LIVESERVERMEDIASUBSSION_H
#include "MP3AudioFileServerMediaSubsession.hh"
class Mp3LiveServerMediaSubssion : public MP3AudioFileServerMediaSubsession
{
public:
    static Mp3LiveServerMediaSubssion* createNew(UsageEnvironment& env, Boolean reuseFirstSource);

protected:
    Mp3LiveServerMediaSubssion(UsageEnvironment& env, Boolean reuseFirstSource);
    ~Mp3LiveServerMediaSubssion();

protected:
    //重定义虚函数
    FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
};

#endif // MP3LIVESERVERMEDIASUBSSION_H

mp3liveservermediasubssion.cpp

#include "mp3liveservermediasubssion.h"
#include "mp3liveframedsource.h"


Mp3LiveServerMediaSubssion *Mp3LiveServerMediaSubssion::createNew(UsageEnvironment &env, Boolean reuseFirstSource)
{
    return new Mp3LiveServerMediaSubssion(env, reuseFirstSource);
}


Mp3LiveServerMediaSubssion::Mp3LiveServerMediaSubssion(UsageEnvironment &env, Boolean reuseFirstSource)
: MP3AudioFileServerMediaSubsession(env, 0, reuseFirstSource, false, NULL)
{
}

Mp3LiveServerMediaSubssion::~Mp3LiveServerMediaSubssion()
{
}

FramedSource* Mp3LiveServerMediaSubssion::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate)
{
    //estimate bitrate:估计的比特率,记得根据需求修改
    estBitrate = 100; // kbps
    //创建视频源

    Mp3LiveFramedSource* liveSource = Mp3LiveFramedSource::createNew(envir());
    if (liveSource == NULL)
    {
        return NULL;
    }

    //为视频流创建Framer
    return createNewStreamSourceCommon(liveSource, liveSource->fileSize(),estBitrate);
}

mp3liveframedsource.h

#ifndef MP3LIVEFRAMEDSOURCE_H
#define MP3LIVEFRAMEDSOURCE_H
#include "ByteStreamFileSource.hh"
#include "UsageEnvironment.hh"
#include "GroupsockHelper.hh"

class Mp3LiveFramedSource : public ByteStreamFileSource
{
public:
    static Mp3LiveFramedSource* createNew(UsageEnvironment& env, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0);
    unsigned int maxFrameSize() const;

protected:
    Mp3LiveFramedSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame);
    ~Mp3LiveFramedSource();


private:
    //重定义虚函数
    virtual void doGetNextFrame();
};

#endif // MP3LIVEFRAMEDSOURCE_H

mp3liveframedsource.cpp

#include "mp3liveframedsource.h"

Mp3LiveFramedSource *Mp3LiveFramedSource::createNew(UsageEnvironment &env, unsigned preferredFrameSize, unsigned playTimePerFrame)
{
    Mp3LiveFramedSource* newSource = new Mp3LiveFramedSource(env, preferredFrameSize, playTimePerFrame);
    return newSource;
}


Mp3LiveFramedSource::Mp3LiveFramedSource(UsageEnvironment &env, unsigned preferredFrameSize, unsigned playTimePerFrame)
: ByteStreamFileSource(env, 0, preferredFrameSize, playTimePerFrame)
{

}

Mp3LiveFramedSource::~Mp3LiveFramedSource()
{

}

void Mp3LiveFramedSource::doGetNextFrame()
{
    QByteArray data = nullptr;
    //这里获取自己的实时音频数据
    //media_->getDataAudio(data);
    ...
    
    fFrameSize = data.size();
    if (fFrameSize > fMaxSize)
    {
        fNumTruncatedBytes = fFrameSize - fMaxSize;
        fFrameSize = fMaxSize;
        envir()<<"frame size "<<fFrameSize<<" MaxSize size "<<fMaxSize<<"fNumTruncatedBytes\n";
    }
    else
    {
        fNumTruncatedBytes = 0;
    }

    if(data.size()!=0){
        memmove(fTo, data.data(), fFrameSize);
    }

    gettimeofday(&fPresentationTime, NULL);//时间戳
    nextTask() = envir().taskScheduler().scheduleDelayedTask(40000, (TaskFunc*)FramedSource::afterGetting, this);
}

unsigned int Mp3LiveFramedSource::maxFrameSize() const
{
    return 300000;
}

 

扩大视频的容量

当传输的视频的分辨率较高时,需要对live555进行调整。

 

1增加发送端的OutPacketBuffer::maxSize大小,LIVE555默认OutPacketBuffer的大小只有60000,可能导致发送数据丢失从而接收端花屏,所以在main函数里 调用

OutPacketBuffer::maxSize = 90000;

当该值小时可能出现的错误提示(这里是没有设置此值,使用的live555的默认值):

MultiFramedRTPSink::afterGettingFrame1(): The input frame data was too large for our buffer size (61140).  12748 bytes of trailing data was dropped!  Correct this by increasing "OutPacketBuffer::maxSize" to at least 72748, *before* creating this 'RTPSink'.  (Current value is 60000.)

 

2.扩展帧解析buffer大小,即BANK_SIZE,默认值为150k,传输的视频帧较大所以设置为300k。否则视频祯超出大小,会被Live555抛弃。

这个值是在live555源码的StreamParser.cpp里,改为 #define BANK_SIZE 300000 ,修改完后重新编译live555。

 

3.ByteStreamFileSource的maxFrameSize重写,直接返回BANK_SIZE的最大值。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章