用librtmp接收H264+AAC的例子代碼網上參考資源比較少,這份代碼來自www.cnblogs.com的某個博客,但是已經找不到原鏈接了。把代碼附上,幫助各位網友在開發時少走些彎路。
//////////////////////////////////////////////////////////
#include "librtmp/rtmp_sys.h"
#include "librtmp/log.h"
#define HTON16(x) ((x>>8&0xff)|(x<<8&0xff00))
#define HTON24(x) ((x>>16&0xff)|(x<<16&0xff0000)|(x&0xff00))
#define HTON32(x) ((x>>24&0xff)|(x>>8&0xff00)|(x<<8&0xff0000)|(x<<24&0xff000000))
#define HTONTIME(x) ((x>>16&0xff)|(x<<16&0xff0000)|(x&0xff00)|(x&0xff000000))
#pragma comment(lib, "librtmp.lib")
#pragma pack(1)
typedef struct _FLVHead
{
char type[3];
char version;
char stream_info;
int offset;
}FLVHead;
typedef struct _FLVTag
{
int tag_size;
char type;
char length[3];
char timecamp[3];
char timecampex;
char StreamsID[3];
}FLVTag;
#pragma pack()
char* ByteRead8(int *i8, char *buf)
{
memcpy(i8, buf, 1);
return buf+1;
}
char* ByteRead16(int *i16, char *buf)
{
memcpy(i16, buf, 2);
*i16 = HTON16(*i16);
return buf+2;
}
char* ByteRead32(int *i32, char *buf)
{
memcpy(i32, buf, 4);
*i32 = HTON32(*i32);
return buf+4;
}
void UnPackH264(char *data,int len, char * outData, int & outLen)
{
int h264space = 0x01000000;
char *buf = data+1;
char * pOutBuf = outData;
int avctype = 0;
buf = ByteRead8(&avctype,buf);
buf += 3;
int templength = 0;
char *tempbuff = NULL;
if (avctype == 0)
{
buf += 6;
buf = ByteRead16(&templength,buf);
tempbuff = buf;
// fwrite(&h264space, 1, 4, h264file);
memcpy(pOutBuf, &h264space, 4);
pOutBuf += 4;
// fwrite(tempbuff, 1, templength, h264file);
memcpy(pOutBuf, tempbuff, templength);
pOutBuf += templength;
buf += templength;
buf = ByteRead8(&templength, buf);
buf = ByteRead16(&templength, buf);
tempbuff = buf;
// fwrite(&h264space, 1, 4, h264file);
memcpy(pOutBuf, &h264space, 4);
pOutBuf += 4;
// fwrite(tempbuff, 1, templength, h264file);
memcpy(pOutBuf, tempbuff, templength);
pOutBuf += templength;
buf += templength;
}
else
{
int countsize = 2 + 3;
while (countsize < len)
{
buf = ByteRead32(&templength, buf);
tempbuff = buf;
//fwrite(&h264space, 1, 4, h264file);
memcpy(pOutBuf, &h264space, 4);
pOutBuf += 4;
// fwrite(tempbuff, 1, templength, h264file);
memcpy(pOutBuf, tempbuff, templength);
pOutBuf += templength;
buf += templength;
countsize += (templength + 4);
}
}
outLen = pOutBuf - outData;
}
int AAC_TO_ADTS(unsigned char * bufIn, int len, int audioSamprate, unsigned char* pBufOut, const int nBufSize, int* pOutLen)
{
//TRACE("AAC Header: %02x%02x%02x%02x \n", bufIn[0], bufIn[1], bufIn[2], bufIn[3]);
unsigned char ADTS[] = {0xFF, 0xF9, 0x00, 0x00, 0x00, 0x00, 0xFC};
int audioChannel = 2;//音頻聲道 1或2
int audioBit = 16;//16位 固定
//unsigned int framelen;
const int header = 7; // header = sizeof(ADTS);
unsigned char * pBufSrc = bufIn;
unsigned char * pBufDest = pBufOut;
*pOutLen = 0;
switch(audioSamprate) //音頻採樣率
{
case 8000:
ADTS[2] = 0x6C;
break;
case 16000:
ADTS[2] = 0x60;
break;
case 32000:
ADTS[2] = 0x54;//0xD4
break;
case 44100:
ADTS[2] = 0x50;
break;
case 48000:
ADTS[2] = 0x4C;
break;
case 96000:
ADTS[2] = 0x40;
break;
case 11025:
ADTS[2] = 0x68;
break;
case 12000:
ADTS[2] = 0xE4;
break;
case 22050:
ADTS[2] = 0xDC;
break;
default:
break;
}
ADTS[3] = (audioChannel==2)?0x80:0x40;
#if 0
{
unsigned short au_sizes_array[32] = {0};
unsigned int framelen = 0;
int au_size = pBufSrc[1]>>3;
int au_num = au_size>>1;
ASSERT(au_num < 32);
pBufSrc+=2;
for(int i=0; i<au_num; i++)
{
framelen = ((unsigned int)pBufSrc[0] << 5) & 0x1FE0;
framelen |= ((pBufSrc[1] >> 3) & 0x1f);
au_sizes_array[i] = framelen;
pBufSrc += 2;
}
TRACE("Au_Num = %d \n", au_num);
for(int i=0; i<au_num; i++)
{
framelen = au_sizes_array[i];
ADTS[3] |= ((framelen+header) & 0x1800) >> 11;
ADTS[4] = ((framelen+header) & 0x1FF8) >> 3;
ADTS[5] = ((framelen+header) & 0x0007) << 5;
ADTS[5] |= 0x1F;
if (*pOutLen + framelen + header > nBufSize)
break;
memcpy(pBufDest, ADTS, sizeof(ADTS));
memcpy(pBufDest+header, pBufSrc, framelen);
pBufDest += framelen+header;
*pOutLen += framelen+header;
pBufSrc += framelen;
}
}
#else
unsigned int framelen = 0;
{
framelen = len - 2;
ADTS[3] |= ((framelen+header) & 0x1800) >> 11;
ADTS[4] = ((framelen+header) & 0x1FF8) >> 3;
ADTS[5] = ((framelen+header) & 0x0007) << 5;
ADTS[5] |= 0x1F;
memcpy(pBufDest, ADTS, sizeof(ADTS));
memcpy(pBufDest+header, pBufSrc+2, framelen);
pBufDest += framelen+header;
*pOutLen += framelen+header;
}
#endif
return 1;
}
//定義變量
RTMP * rtmp;
string m_InputUrl; //RTMP URL
bool m_stop_status; //是否退出接收
char* m_h264data;
BYTE * m_aacADTSBuf;
bool m_bSpsPpsGot; //是否已拿到SPS和PPS
BYTE m_SpsPpsBuffer[2048];
int m_nSpsPpsSize;
#define ONE_AUDIO_FRAME_SIZE 192000
//初始化變量
BOOL Init()
{
m_h264data = new char[1024*1000];
m_aacADTSBuf = new BYTE[ONE_AUDIO_FRAME_SIZE];
m_bSpsPpsGot = false;
m_nSpsPpsSize = 0;
memset(m_SpsPpsBuffer, 0, sizeof(m_SpsPpsBuffer));
m_stop_status = false;
//////////////////////////////////////////////
bool bLiveStream = true;
/* set log level */
//RTMP_LogLevel loglvl=RTMP_LOGDEBUG;
//RTMP_LogSetLevel(loglvl);
rtmp = RTMP_Alloc();
RTMP_Init(rtmp);
//set connection timeout,default 30s
rtmp->Link.timeout=10;
// HKS's live URL
if(!RTMP_SetupURL(rtmp, (char*)m_InputUrl.c_str()))
{
TRACE("RTMP_SetupURL Err\n");
RTMP_Free(rtmp);
return FALSE;
}
if (bLiveStream){
rtmp->Link.lFlags|=RTMP_LF_LIVE;
}
RTMP_SetBufferMS(rtmp, 5*1000);
if(!RTMP_Connect(rtmp,NULL))
{
TRACE("RTMP_Connect Err\n");
RTMP_Free(rtmp);
rtmp=NULL;
return FALSE;
}
if(!RTMP_ConnectStream(rtmp,0))
{
TRACE("ConnectStream Err\n");
RTMP_Close(rtmp);
RTMP_Free(rtmp);
rtmp=NULL;
return FALSE;
}
return TRUE;
}
//釋放對象
void UnInit()
{
if(rtmp)
{
RTMP_Close(rtmp);
RTMP_Free(rtmp);
rtmp=NULL;
}
if(m_h264data)
{
delete m_h264data;
m_h264data = NULL;
}
if(m_aacADTSBuf != NULL)
{
delete m_aacADTSBuf;
m_aacADTSBuf = NULL;
}
}
//接收RTMP和解析
void readAndDecode()
{
int nVideoFramesNum = 0;
int64_t first_pts_time = 0;
DWORD start_time = GetTickCount();
int64_t pts_time;
long countbufsize=0;
int nRead = 0;
FLVHead flvHead;
FLVTag flvTag;
memset(&flvHead,0,sizeof(flvHead));
nRead = RTMP_Read(rtmp,(char*)&flvHead,sizeof(FLVHead));
if(nRead <= 0)
{
TRACE("RTMP_Read failed \n");
return;
}
int bufsize = 1024*1024*1;
char *data =(char*)malloc(bufsize);
memset(data,0,bufsize);
flvHead.offset = HTON32(flvHead.offset);
TRACE("文件類型: %c %c %c\n",flvHead.type[0],flvHead.type[1],flvHead.type[2]);
TRACE("版本: %d\n",flvHead.version);
TRACE("流信息: %d\n",flvHead.stream_info);
TRACE("head長度: %d\n",flvHead.offset);
AVCodecID audio_codec_id = AV_CODEC_ID_AAC;
memset(&flvTag,0,sizeof(FLVTag));
while(1)
{
if(m_stop_status == true)
{
break;
}
//音視頻
if( (nRead = RTMP_Read(rtmp,(char*)&flvTag,sizeof(FLVTag))) == sizeof(FLVTag))
{
flvTag.tag_size = HTON32(flvTag.tag_size);
int length = 0;
memcpy(&length,flvTag.length,3);
length = HTON24(length);
unsigned int time = 0;
memcpy(&time,flvTag.timecamp,3);
time = HTONTIME(time);
//TRACE("\nTag大小: %d\n",flvTag.tag_size);
//TRACE("Tag類型: %d, 長度:%d, 時間戳: %d\n",flvTag.type, length, time);
pts_time = time*90; //轉成90KHz爲單位
nRead = RTMP_Read(rtmp,data,length);
if(nRead > 0)
{
if(flvTag.type == 8) //音頻
{
if(audio_codec_id == AV_CODEC_ID_AAC)
{
int nOutAACLen = 0;
AAC_TO_ADTS((unsigned char*)data, nRead, 44100, m_aacADTSBuf, ONE_AUDIO_FRAME_SIZE, &nOutAACLen);
//DecodeAudio(m_aacADTSBuf, nOutAACLen, AV_CODEC_ID_AAC, TRUE);
}
else
{
//DecodeAudio(data, nRead, AV_CODEC_ID_AAC, TRUE);
}
}
else if(flvTag.type == 9) //視頻
{
nVideoFramesNum++;
if(!(data[0] == 0x0 && data[1] == 0x0 && data[2] == 0x0 && data[3] == 0x01))
{
//TRACE("Not H264 StartCode!\n");
int outLen = 0;
UnPackH264(data, nRead, m_h264data, outLen); //每個NALU前已經添加了四個字節的開始碼(0x0000001)
if(outLen < 4)
{
TRACE("Video frame was too short! \n");
continue;
}
int nalu_type = (m_h264data[4] & 0x1F);
TRACE("FrameNo: %d, nalu_type: %d, size: %d \n", nVideoFramesNum, nalu_type, outLen);
if(!m_bSpsPpsGot && nalu_type == 7)
{
ASSERT(outLen < sizeof(m_SpsPpsBuffer));
memcpy(m_SpsPpsBuffer,m_h264data, outLen);
m_nSpsPpsSize = outLen;
m_bSpsPpsGot = true;
}
if(nalu_type == 5) //I frame
{
if(m_bSpsPpsGot) //在I幀前插入SPS和PPS
{
memmove(m_h264data + m_nSpsPpsSize, m_h264data, outLen);
memcpy(m_h264data, m_SpsPpsBuffer, m_nSpsPpsSize);
outLen += m_nSpsPpsSize;
}
}
if(outLen > 0)
{
//DecodeVideo((BYTE*)m_h264data, outLen, pts_time);
}
}
else
{
//DecodeVideo((BYTE*)data, nRead, pts_time);
}
}
}
}
if(nRead < 0)
{
TRACE("RTMP_Read return: %d \n", nRead);
break;
}
else if(nRead == 0)
{
Sleep(10);
continue;
}
}//while
free(data);
if(rtmp != NULL)
{
RTMP_Close(rtmp);
RTMP_Free(rtmp);
rtmp = NULL;
}
}
注意上面的代碼會自動插入SPS和PPS到每個I幀前面,你最好先考慮一下你的代碼是否需要這個功能。只要視頻開頭有一個I幀攜帶了PPS+PPS,視頻也是能播放的。如果不需要每個I幀前面都插入SPS+PPS(第一個I幀除外),可以屏蔽掉那些代碼。
如想了解更多librtmp使用上的問題,可參考我的一篇博文:使用librtmp接收要注意的問題