live555傳輸實時h264視頻流和mp3音頻流

主要是利用live555的rtsp服務器發送實時視頻和音頻流,並擴大視頻buffer的容量來防止視頻幀較大時出現數據丟失的問題。

h264的實時視頻流

live555的h264視頻流是參考https://blog.csdn.net/caoshangpa/article/details/53200527

主要是創建服務器的時候 sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, inputFileName,reuseFirstSource)),中的H264VideoFileServerMediaSubsession替換成自己的子會話。

H264VideoFileServerMediaSubsession類在其createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate)函數中調用了ByteStreamFileSource::createNew(envir(), fFileName),而frame的獲取正是在ByteStreamFileSource類中的doGetNextFrame()函數中實現的。因此,這裏需要繼承H264VideoFileServerMediaSubsession和ByteStreamFileSource類,並重寫其中的createNewStreamSource和doGetNextFrame函數。在doGetNextFrame獲取數據替換成自己的方式即可。

代碼

main.cpp中創建rtspserver的代碼

    //創建任務調度器並初始化使用環境
    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
    UserAuthenticationDatabase* authDB = NULL;
    //創建RTSP服務器,開始監聽模客戶端的連接
    //注意這裏的端口號不是默認的554端口,因此訪問URL時,需指定該端口號
    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);

    if (rtspServer == NULL)
    {
        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
        exit(1);
    }

    char const* descriptionString = "Session streamed by \"video\"";

    //流名字,媒體名
    char const* streamName = "video";

    //當客戶點播時,要輸入流名字streamName,告訴RTSP服務器點播的是哪個流。
    //創建媒體會話,流名字和文件名的對應關係是通過增加子會話建立起來的。媒體會話對會話描述、會話持續時間、流名字等與會話有關的信息進行管理。
    //第2個參數:媒體名、3:媒體信息、4:媒體描述
    ServerMediaSession* sms= ServerMediaSession::createNew(*env, streamName, streamName ,descriptionString);

    OutPacketBuffer::maxSize = 90000;

    //修改爲自己實現的H264LiveVideoServerMediaSubssion
    H264LiveVideoServerMediaSubssion* sub = H264LiveVideoServerMediaSubssion::createNew(*env, reuseFirstSource);
    sms->addSubsession(sub);


    Mp3LiveServerMediaSubssion* subAudio = Mp3LiveServerMediaSubssion::createNew(*env, reuseFirstSource);
    sms->addSubsession(subAudio);


    //爲rtspserver添加session
    rtspServer->addServerMediaSession(sms);

    qInfo()<<"\n url"<<rtspServer->rtspURL(sms)<<"\n stream"<<streamName<<"\n";
    *env<<"\n url"<<rtspServer->rtspURL(sms)<<"\n stream"<<streamName<<"\n";

    media->startVideo();
    //進入事件循環,對套接字的讀取事件和對媒體文件的延時發送操作都在這個循環中完成。
    env->taskScheduler().doEventLoop();

    

h264livevideoservermediasubssion.h

#ifndef H264LIVEVIDEOSERVERMEDIASUBSSION_H
#define H264LIVEVIDEOSERVERMEDIASUBSSION_H

#include "H264VideoFileServerMediaSubsession.hh"

class H264LiveVideoServerMediaSubssion : public H264VideoFileServerMediaSubsession {

public:
    static H264LiveVideoServerMediaSubssion* createNew(UsageEnvironment& env, Boolean reuseFirstSource);

protected:
    H264LiveVideoServerMediaSubssion(UsageEnvironment& env, Boolean reuseFirstSource);
    ~H264LiveVideoServerMediaSubssion();


protected:
    //重定義虛函數
    FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
};


#endif // H264LIVEVIDEOSERVERMEDIASUBSSION_H

h264livevideoservermediasubssion.cpp

#include "h264livevideoservermediasubssion.h"
#include "h264LiveFramedSource.h"
#include "H264VideoStreamFramer.hh"

H264LiveVideoServerMediaSubssion* H264LiveVideoServerMediaSubssion::createNew(UsageEnvironment& env, Boolean reuseFirstSource)
{
    return new H264LiveVideoServerMediaSubssion(env, reuseFirstSource);
}

H264LiveVideoServerMediaSubssion::H264LiveVideoServerMediaSubssion(UsageEnvironment& env, Boolean reuseFirstSource)
: H264VideoFileServerMediaSubsession(env, 0, reuseFirstSource)
{

}

H264LiveVideoServerMediaSubssion::~H264LiveVideoServerMediaSubssion()
{
}

FramedSource* H264LiveVideoServerMediaSubssion::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate)
{
    //estimate bitrate:估計的比特率,記得根據需求修改
    estBitrate = 100; // kbps
    //創建視頻源

    H264LiveFramedSource* liveSource = H264LiveFramedSource::createNew(envir());
    if (liveSource == NULL)
    {
        return NULL;
    }

    //爲視頻流創建Framer
    return H264VideoStreamFramer::createNew(envir(), liveSource);
}

h264liveframedsource.h

#ifndef _H264LIVEFRAMEDSOURCE_H
#define _H264LIVEFRAMEDSOURCE_H


#include "ByteStreamFileSource.hh"
#include "UsageEnvironment.hh"

class H264LiveFramedSource : public ByteStreamFileSource
{
public:
    static H264LiveFramedSource* createNew(UsageEnvironment& env, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0);
    unsigned int maxFrameSize() const;

protected:
    H264LiveFramedSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame);
    ~H264LiveFramedSource();


private:
    //重定義虛函數
    virtual void doGetNextFrame();
};

#endif

h264liveframedsource.cpp:

#include "h264LiveFramedSource.h"
#include "GroupsockHelper.hh"
#include <QByteArray>


H264LiveFramedSource::H264LiveFramedSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame)
: ByteStreamFileSource(env, 0, preferredFrameSize, playTimePerFrame)
{

}

H264LiveFramedSource* H264LiveFramedSource::createNew(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame)
{
    H264LiveFramedSource* newSource = new H264LiveFramedSource(env, preferredFrameSize, playTimePerFrame);
    return newSource;
}

H264LiveFramedSource::~H264LiveFramedSource()
{

}

// This function is called when new frame data is available from the device.
// We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
// 'in' parameters (these should *not* be modified by this function):
//     fTo: The frame data is copied to this address.
//         (Note that the variable "fTo" is *not* modified.  Instead,
//          the frame data is copied to the address pointed to by "fTo".)
//     fMaxSize: This is the maximum number of bytes that can be copied
//         (If the actual frame is larger than this, then it should
//          be truncated, and "fNumTruncatedBytes" set accordingly.)
// 'out' parameters (these are modified by this function):
//     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
//     fNumTruncatedBytes: Should be set iff the delivered frame would have been
//         bigger than "fMaxSize", in which case it's set to the number of bytes
//         that have been omitted.
//     fPresentationTime: Should be set to the frame's presentation time
//         (seconds, microseconds).  This time must be aligned with 'wall-clock time' - i.e., the time that you would get
//         by calling "gettimeofday()".
//     fDurationInMicroseconds: Should be set to the frame's duration, if known.
//         If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
//         to set this variable, because - in this case - data will never arrive 'early'.


void H264LiveFramedSource::doGetNextFrame()
{

    QByteArray data = nullptr;
    //這裏獲取實時自己的實時數據
    // media_->getData(data);
    

    fFrameSize = data.size();
    if (fFrameSize > fMaxSize)
    {
        fNumTruncatedBytes = fFrameSize - fMaxSize;
        fFrameSize = fMaxSize;
        envir()<<"frame size "<<fFrameSize<<" MaxSize size "<<fMaxSize<<"fNumTruncatedBytes\n";
    }
    else
    {
        fNumTruncatedBytes = 0;
    }


    if(data.size()!=0){
        //把得到的實時數據複製進輸出端
        memmove(fTo, data.data(), fFrameSize);
    }

    gettimeofday(&fPresentationTime, NULL);//時間戳

    //表示延遲0秒後再執行afterGetting函數,也可以直接用afterGetting(this)
    nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this);
//    nextTask() = (TaskFunc*)FramedSource::afterGetting(this);


}

//這裏返回的數值是BANK_SIZE的最大值
unsigned int H264LiveFramedSource::maxFrameSize() const
{
    return 300000;
}

 

mp3音頻流

原來參考https://blog.csdn.net/taixinlfx/article/details/8854440進行改寫,沒有調通。

後面使用了和h264視頻一樣的方式,在testProgs文件夾下的 testOnDemandRTSPServer.cpp 的demo找到mp3對應的subssion類爲MP3AudioFileServerMediaSubsession,進行類似的改寫。

創建服務器的時候 sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(*env, inputFileName,reuseFirstSource)),中的MP3AudioFileServerMediaSubsession替換成自己的子會話。

MP3AudioFileServerMediaSubsession類在其createNewStreamSource函數中調用了ByteStreamFileSource::createNew,而frame的獲取是在ByteStreamFileSource類中的doGetNextFrame()函數中實現的。因此,這裏需要繼承MP3AudioFileServerMediaSubsession和ByteStreamFileSource類,並重寫其中的createNewStreamSource和doGetNextFrame函數。在doGetNextFrame獲取數據替換成自己的方式即可。

因此對於要傳輸其他格式的實時數據流都可以嘗試此方式進行改寫。

代碼:

mp3liveservermediasubssion.h

#ifndef MP3LIVESERVERMEDIASUBSSION_H
#define MP3LIVESERVERMEDIASUBSSION_H
#include "MP3AudioFileServerMediaSubsession.hh"
class Mp3LiveServerMediaSubssion : public MP3AudioFileServerMediaSubsession
{
public:
    static Mp3LiveServerMediaSubssion* createNew(UsageEnvironment& env, Boolean reuseFirstSource);

protected:
    Mp3LiveServerMediaSubssion(UsageEnvironment& env, Boolean reuseFirstSource);
    ~Mp3LiveServerMediaSubssion();

protected:
    //重定義虛函數
    FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
};

#endif // MP3LIVESERVERMEDIASUBSSION_H

mp3liveservermediasubssion.cpp

#include "mp3liveservermediasubssion.h"
#include "mp3liveframedsource.h"


Mp3LiveServerMediaSubssion *Mp3LiveServerMediaSubssion::createNew(UsageEnvironment &env, Boolean reuseFirstSource)
{
    return new Mp3LiveServerMediaSubssion(env, reuseFirstSource);
}


Mp3LiveServerMediaSubssion::Mp3LiveServerMediaSubssion(UsageEnvironment &env, Boolean reuseFirstSource)
: MP3AudioFileServerMediaSubsession(env, 0, reuseFirstSource, false, NULL)
{
}

Mp3LiveServerMediaSubssion::~Mp3LiveServerMediaSubssion()
{
}

FramedSource* Mp3LiveServerMediaSubssion::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate)
{
    //estimate bitrate:估計的比特率,記得根據需求修改
    estBitrate = 100; // kbps
    //創建視頻源

    Mp3LiveFramedSource* liveSource = Mp3LiveFramedSource::createNew(envir());
    if (liveSource == NULL)
    {
        return NULL;
    }

    //爲視頻流創建Framer
    return createNewStreamSourceCommon(liveSource, liveSource->fileSize(),estBitrate);
}

mp3liveframedsource.h

#ifndef MP3LIVEFRAMEDSOURCE_H
#define MP3LIVEFRAMEDSOURCE_H
#include "ByteStreamFileSource.hh"
#include "UsageEnvironment.hh"
#include "GroupsockHelper.hh"

class Mp3LiveFramedSource : public ByteStreamFileSource
{
public:
    static Mp3LiveFramedSource* createNew(UsageEnvironment& env, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0);
    unsigned int maxFrameSize() const;

protected:
    Mp3LiveFramedSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame);
    ~Mp3LiveFramedSource();


private:
    //重定義虛函數
    virtual void doGetNextFrame();
};

#endif // MP3LIVEFRAMEDSOURCE_H

mp3liveframedsource.cpp

#include "mp3liveframedsource.h"

Mp3LiveFramedSource *Mp3LiveFramedSource::createNew(UsageEnvironment &env, unsigned preferredFrameSize, unsigned playTimePerFrame)
{
    Mp3LiveFramedSource* newSource = new Mp3LiveFramedSource(env, preferredFrameSize, playTimePerFrame);
    return newSource;
}


Mp3LiveFramedSource::Mp3LiveFramedSource(UsageEnvironment &env, unsigned preferredFrameSize, unsigned playTimePerFrame)
: ByteStreamFileSource(env, 0, preferredFrameSize, playTimePerFrame)
{

}

Mp3LiveFramedSource::~Mp3LiveFramedSource()
{

}

void Mp3LiveFramedSource::doGetNextFrame()
{
    QByteArray data = nullptr;
    //這裏獲取自己的實時音頻數據
    //media_->getDataAudio(data);
    ...
    
    fFrameSize = data.size();
    if (fFrameSize > fMaxSize)
    {
        fNumTruncatedBytes = fFrameSize - fMaxSize;
        fFrameSize = fMaxSize;
        envir()<<"frame size "<<fFrameSize<<" MaxSize size "<<fMaxSize<<"fNumTruncatedBytes\n";
    }
    else
    {
        fNumTruncatedBytes = 0;
    }

    if(data.size()!=0){
        memmove(fTo, data.data(), fFrameSize);
    }

    gettimeofday(&fPresentationTime, NULL);//時間戳
    nextTask() = envir().taskScheduler().scheduleDelayedTask(40000, (TaskFunc*)FramedSource::afterGetting, this);
}

unsigned int Mp3LiveFramedSource::maxFrameSize() const
{
    return 300000;
}

 

擴大視頻的容量

當傳輸的視頻的分辨率較高時,需要對live555進行調整。

 

1增加發送端的OutPacketBuffer::maxSize大小,LIVE555默認OutPacketBuffer的大小隻有60000,可能導致發送數據丟失從而接收端花屏,所以在main函數裏 調用

OutPacketBuffer::maxSize = 90000;

當該值小時可能出現的錯誤提示(這裏是沒有設置此值,使用的live555的默認值):

MultiFramedRTPSink::afterGettingFrame1(): The input frame data was too large for our buffer size (61140).  12748 bytes of trailing data was dropped!  Correct this by increasing "OutPacketBuffer::maxSize" to at least 72748, *before* creating this 'RTPSink'.  (Current value is 60000.)

 

2.擴展幀解析buffer大小,即BANK_SIZE,默認值爲150k,傳輸的視頻幀較大所以設置爲300k。否則視頻禎超出大小,會被Live555拋棄。

這個值是在live555源碼的StreamParser.cpp裏,改爲 #define BANK_SIZE 300000 ,修改完後重新編譯live555。

 

3.ByteStreamFileSource的maxFrameSize重寫,直接返回BANK_SIZE的最大值。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章