音视频学习(十、再探rtmp拉流)

昨天把推流写了,不知道看的懂的有多少,确实没有看到代码直接看文字有点难,这不是自己写的代码,所以还是只要是分析为主,不能全部粘贴出来。

10.1 再探rtmp拉流

相比推流还有视频采集模块,拉流就简单了一点,只有一个Rtmpplayer模块和vidoeDecode模块,Rtmpplayer只要负责拉流工作,vidoeDecode负责解码工作。

10.2 Rtmpplayer分析

Rtmpplayer内部启动了一个单独的线程,这个单独的线程专门拉去视频流数据,也是判断Rtmp包中是否有数据,如果有数据就取出来分析分析。

void* RtmpPlayer::readPacketThread()
{
    RTMPPacket packet = {0};

    while(!_exit_thread)
    {
        //短线重连
        if(!isConnect())
        {
            printf("短线重连 re connect");
            if(!connect(_url))      //重连失败
            {
                printf("短线重连 reConnect fail %s",_url.c_str());
                msleep(10);
                continue;
            }
        }


        RTMP_ReadPacket(_rtmp, &packet);		//rtmp读取包数据

        if(RTMPPacket_IsReady(&packet))    //判断是不是整个包都组好了
        {
            uint8_t nalu_header[4] = { 0x00, 0x00, 0x00, 0x01 };

            if(!packet.m_nBodySize)
                continue;

            if(packet.m_packetType == RTMP_PACKET_TYPE_VIDEO)
            {
                //这个就是视频数据,视频数据中,包括两个部分,一个是AVC sequence header,一个是nalu
                //这两个分支下面都做了不同的处理

                // 解析完数据再发送给解码器
                // 判断起始字节, 检测是不是spec config, 还原出sps pps等
                // 重组帧
                bool keyframe = 0x17 == packet.m_body[0] ? true : false;
                // AVC NALU : 0x01/ AVC sequence header : 0x00
                bool sequence = 0x00 == packet.m_body[1];        

                //SPS/PPS sequence
                if(sequence)
                {
                   
                }
                // Nalu frames
                else
                {
                    
                }
            }
            else if(packet.m_packetType == RTMP_PACKET_TYPE_AUDIO)
            {
                printf("rtmp audio\n");
                //目前不处理音频,以后处理
            }
            else if(packet.m_packetType == RTMP_PACKET_TYPE_INFO)               
            //一启动就能收到服务器发送过来的两条info信息
            {
                printf("rtmp info\n");
                //这个就是视频的info信息,也就是FLV中的metadata数据,metadata只要包含视频中配置信息

                //解析信息tag
                parse_script_tag(packet);
                if(video_width > 0 && video_height>0)
                {
                    FLVMetadataMsg *metadata = new FLVMetadataMsg();
                    metadata->width = video_width;//720;
                    metadata->height = video_height;//480;
                    video_callable_object_(RTMP_BODY_METADATA, metadata, false);
                }
            }
            else
            {
                printf("rtmp else\n");
                RTMP_ClientPacket(_rtmp, &packet);
            }
        }

        RTMPPacket_Free(&packet);

        memset(&packet,0,sizeof(RTMPPacket));
    }
    printf("thread exit\n");
    return NULL;

}

10.2.1 metadata数据解析

//解析脚本文件
void RtmpPlayer::parse_script_tag(RTMPPacket &packet)
{
    AMFObject obj;
    AVal val;
    AMFObjectProperty * property;
    AMFObject subObject;
	//使用rtmp库中的AMF模块进行metadata解析
    if(AMF_Decode(&obj, packet.m_body, packet.m_nBodySize, FALSE) < 0 )
    {
        printf("error AMF Decode\n");
    }

    AMF_Dump(&obj);
    printf(" amf obj %d\n",obj.o_num);
	//为什么有两个for循环,就是因为metadata数据是多个类的方式封装的
    for(int n=0; n<obj.o_num; n++)			
    {
        property = AMF_GetProp(&obj, NULL, n);  //通过父类中提取出子对象的信息
        if(property != NULL)
        {
           if(property->p_type == AMF_OBJECT)	//如果子对象也是一个类,就要再次提取
           {
                AMFProp_GetObject(property, &subObject);      //获取子类对象
                for(int m = 0; m < subObject.o_num; m++)
                {
                    property = AMF_GetProp(&subObject, NULL, m);	//这里才是再次提取
                    printf("val = %s\n",property->p_name.av_val);

                    if(property != NULL)
                    {
                        if (property->p_type == AMF_OBJECT)		//应该没有对象了
                        {

                        }
                        else if ( property->p_type == AMF_BOOLEAN )  //获取bool值
                        {
                            int bval = AMFProp_GetBoolean(property);
                            if(strncasecmp("stereo", property->p_name.av_val, property->p_name.av_len) == 0)
                            {
                                audio_channel = bval > 0 ? 2 : 1;
                                printf("parse channel %d\n", audio_channel);
                            }
                        }
                        else if (property->p_type == AMF_NUMBER)	//这里是提取值,这个比较多
                       {
                       	//包括视频的宽 高  码流  帧率等
                           double dVal = AMFProp_GetNumber(property);
                           if (strncasecmp("width", property->p_name.av_val, property->p_name.av_len) == 0)
                           {
                               video_width = (int)dVal;
                               printf("parse widht %d\n",video_width);
                           }
                           else if (strcasecmp("height", property->p_name.av_val) == 0)
                           {
                               video_height = (int)dVal;
                               printf("parse Height %d\n",video_height);
                           }
                           else if (strcasecmp("framerate", property->p_name.av_val) == 0)
                           {
                               video_frame_rate = (int)dVal;
                               printf("parse frame_rate %d\n",video_frame_rate);
                           }
                           else if (strcasecmp("videocodecid", property->p_name.av_val) == 0)
                           {
                               video_codec_id = (int)dVal;
                               printf("parse video_codec_id %d\n",video_codec_id);
                           }
                           else if (strcasecmp("audiosamplerate", property->p_name.av_val) == 0)
                           {
                               audio_sample_rate = (int)dVal;
                               printf("parse audiosamplerate %d\n",audio_sample_rate);
                           }
                           else if (strcasecmp("audiodatarate", property->p_name.av_val) == 0)
                           {
                               audio_bit_rate = (int)dVal;
                               printf("parse audiodatarate %d\n",audio_bit_rate);
                           }
                           else if (strcasecmp("audiosamplesize", property->p_name.av_val) == 0)
                           {
                               audio_sample_size = (int)dVal;
                               printf("parse audiosamplesize %d\n",audio_sample_size);
                           }
                           else if (strcasecmp("audiocodecid", property->p_name.av_val) == 0)
                           {
                               audio_codec_id = (int)dVal;
                               printf("parse audiocodecid %d\n",audio_codec_id);
                           }
                           else if (strcasecmp("filesize", property->p_name.av_val) == 0)
                           {
                               file_size = (int)dVal;
                               printf("parse filesize %d\n",file_size);
                           }
                       }
                       else if (property->p_type == AMF_STRING)
                       {
                           AMFProp_GetString(property, &val);
                       }
                    }
                }
           }
           else
           {
               AMFProp_GetString(property, &val);

               printf("val = %s\n",val.av_val);
           }

        }
    }
}

这样就能完全解析出来,要想深入研究,可以去分析源码,我这里就先不分析了,先用起来先

10.2.2 视频数据解析

//printf("rtmp video\n");

// 解析完数据再发送给解码器
// 判断起始字节, 检测是不是spec config, 还原出sps pps等
// 重组帧
bool keyframe = 0x17 == packet.m_body[0] ? true : false;
bool sequence = 0x00 == packet.m_body[1];         // AVC NALU : 0x01/ AVC sequence header : 0x00

//SPS/PPS sequence   昨天分析可以看出,第二个字节为0代表是sps/pps
if(sequence)
{
    uint32_t offset = 10;       //偏移量=10 sps  
    //可以看到昨天推流的数据,10刚好就是 sps nums   body[i++] = 0xE1;           //&0x1f
    uint32_t sps_num = packet.m_body[offset++] & 0x1f;    //需要 & 0x1f 才能算出真正的长度
    if(sps_num > 0)
    {
        _sps_vector.clear();    // 先清空原来的缓存
    }
    for (int i = 0; i < sps_num; i++)				//保存sps数据
    {
        uint8_t len0 = packet.m_body[offset];
        uint8_t len1 = packet.m_body[offset + 1];
        uint32_t sps_len = ((len0 << 8) | len1);
        offset += 2;
        // Write sps data
        std::string sps;
        sps.append(nalu_header, nalu_header + 4); // 存储 start code
        sps.append(packet.m_body + offset, packet.m_body + offset + sps_len);
        _sps_vector.push_back(sps);
        offset += sps_len;                  //sps data
    }

	//后面就是保存pps的数据了
    uint32_t pps_num = packet.m_body[offset++] & 0x1f;
    if(pps_num > 0)
    {
        _pps_vector.clear();    // 先清空原来的缓存
    }
    for (int i = 0; i < pps_num; i++)
    {
        uint8_t len0 = packet.m_body[offset];
        uint8_t len1 = packet.m_body[offset + 1];
        uint32_t pps_len = ((len0 << 8) | len1);
        offset += 2;
        // Write pps data
        std::string pps;
        pps.append(nalu_header, nalu_header + 4); // 存储 start code
        pps.append(packet.m_body + offset, packet.m_body + offset + pps_len);
        _pps_vector.push_back(pps);
        offset += pps_len;
    }
    VideoSequenceHeaderMsg * vid_config_msg = new VideoSequenceHeaderMsg(
                                (uint8_t *)_sps_vector[0].c_str(),
                                _sps_vector[0].size(),
                                (uint8_t *)_pps_vector[0].c_str(),
                                _pps_vector[0].size()
                                );
    //调用回调
    video_callable_object_(RTMP_BODY_VID_CONFIG, vid_config_msg, false);
}
// Nalu frames
else
{
    //nalu 数据就比较简单了,直接跳到长度位置,提取4个字节的数据代表nalu数据长度
    uint32_t offset = 5;
    uint8_t len0 = packet.m_body[offset];
    uint8_t len1 = packet.m_body[offset + 1];
    uint8_t len2 = packet.m_body[offset + 2];
    uint8_t len3 = packet.m_body[offset + 3];
    uint32_t data_len = ((len0 << 24) | (len1 << 16) | (len2 << 8) | len3);
    offset += 4;

    NaluStruct * nalu = new NaluStruct(data_len + 4);
    memcpy(nalu->data, nalu_header, 4);
    memcpy(nalu->data + 4, packet.m_body + offset, data_len);
    if(_video_pre_pts == -1){
        _video_pre_pts= packet.m_nTimeStamp;
        if(!packet.m_hasAbsTimestamp) {
            printf("no init video pts\n");
        }
    }
    else {
        if(packet.m_hasAbsTimestamp)
            _video_pre_pts= packet.m_nTimeStamp;
        else
            _video_pre_pts += packet.m_nTimeStamp;
    }
    nalu->pts = _video_pre_pts;
    video_callable_object_(RTMP_BODY_VID_RAW, nalu, false);
    offset += data_len;
}

不难发现我们采集到数据之后,都会调用一个video_callable函数,这是在初始化的时候绑定的,接下分析一下video_callable函数。

10.2.3 视频回调函数分析

这次回调函数比较简单,回调函数收到Rtmpplayer收到的数据,直接post到队列中。

10.3 videoDecodeLoop分析

videoDecodeLoop模块是视频解码模块,在初始化中,初始化了一个h264decoder模块,还有初始胡一个Looper循环队列,这个循环队列内部带线程,线程只要作用就是检测队列中是否有数据,如果有数据,提取数据,传给回调函数处理,也就是videoDecodeLoop::handle函数。

10.3.1 videoDecodeLoop::handle分析

这个函数是取到队列中的数据,然后调用的:

void videoDecodeLoop::handle(int what, MsgBaseObj *data)
{
    if(what == RTMP_BODY_METADATA)				//接收到metadata数据,进行播放器的初始化
    {
        if(!_video_out_sdl)
        {
            _video_out_sdl = new VideoOutSDL();
            if(!_video_out_sdl)
            {
                printf("new VideoOutSDL() failed\n");
                return;
            }
            Properties vid_out_properties;
            FLVMetadataMsg *metadata = (FLVMetadataMsg *)data;
            vid_out_properties.SetProperty("video_width", metadata->width);
            vid_out_properties.SetProperty("video_height",  metadata->height);
            vid_out_properties.SetProperty("win_x", 1000);
            vid_out_properties.SetProperty("win_title", "pull video display");
            delete metadata;
            if(_video_out_sdl->init(vid_out_properties) != 0)
            {
                printf("video_out_sdl Init failed\n");
                return;
            }
        }
    }
    else if(what == RTMP_BODY_VID_CONFIG)			//把接收到的sps/pps送到解码器中解码
    {
        VideoSequenceHeaderMsg *vid_config = (VideoSequenceHeaderMsg *)data;

        // 把sps送给解码器
        _yuv_buf_size = YUV_BUF_MAX_SIZE;
        _h264decoder->decode(vid_config->_sps, vid_config->_sps_size,
                                        _yuv_buf, _yuv_buf_size);
        // 把pps送给解码器
        _yuv_buf_size = YUV_BUF_MAX_SIZE;
        _h264decoder->decode(vid_config->_pps, vid_config->_pps_size,
                                        _yuv_buf, _yuv_buf_size);
        delete vid_config;
    }
    else							//最后就是发送nalu数据到解压器中解码
    {
        NaluStruct *nalu = (NaluStruct *)data;
        _yuv_buf_size = YUV_BUF_MAX_SIZE;
        if(_h264decoder->decode(nalu->data, nalu->size,
                                        _yuv_buf, _yuv_buf_size) == 0)
        {
            callable_object_(_yuv_buf, _yuv_buf_size);
        }
        delete nalu;     // 要手动释放资源
    }
}

这个都比较简单,接下来重点的还是h264解码器。

10.3.2 h264decoder分析

h264decode解码初始化跟demo的解压代码差不多,所以这里最主要还是要看解码函数:

int h264decoder::decode(uint8_t *in, int32_t in_len, uint8_t *out, int32_t &out_len)
{
   int got_picture=0;

   AVPacket pkt;
   av_init_packet(&pkt);
   pkt.data = in;
   pkt.size = in_len;
   
   int readed = avcodec_decode_video2(_ctx, _frame, &got_picture, &pkt);

   //Si hay picture_
   if (got_picture && readed>0)
   {
       if(_ctx->width==0 || _ctx->height==0)
       {
           printf("-Wrong dimmensions [%d,%d]\n", _ctx->width, _ctx->height);
           return 0;
       }
       int width = _frame->width;       //720
       int height = _frame->height;     //480

       out_len = _frame->width * _frame->height * 1.5;    //图片的长度,这是h264输入,然后YUV数据出来
       /*memcpy(out, _frame->data[0], _frame->width * _frame->height);        //Y
       memcpy(out + _frame->width * _frame->height, _frame->data[1],        //U
               (_frame->width * _frame->height) /4);
       memcpy(out + (_frame->width * _ctx->height) + (_frame->width * _ctx->height) /4,
              _frame->data[2],
               (_frame->width * _frame->height) /4);   */                   //V

	//解码出来的数据是对齐的,所以需要这样子复制,如果是上面那样复制不行的
       for(int j=0; j<height; j++)
       {
           memcpy(out + j*width, _frame->data[0] + j * _frame->linesize[0], width);    
           //768   64字节对齐
       }
       out += width * height;
       for(int j=0; j<height/2; j++)
       {
           memcpy(out + j*width/2, _frame->data[1] + j * _frame->linesize[1], width/2);
       }
       out += width * height/2/2;
       for(int j=0; j<height/2; j++)
       {
           memcpy(out + j*width/2, _frame->data[2] + j * _frame->linesize[2], width/2);
       }

		//下面是保存成YUV格式的,保存成YUV格式也是对齐保存
       static FILE *dump_yuv = NULL;
       if(!dump_yuv)
       {
           dump_yuv = fopen("h264_dump_320x240.yuv", "wb");
           if(!dump_yuv)
           {
               printf("fopen h264_dump.yuv failed");
           }
       }
       if(dump_yuv)
       {//ffplay -ar 48000 -ac 2 -f s16le -i aac_dump.pcm
           //AVFrame存储YUV420P对齐分析
           //https://blog.csdn.net/dancing_night/article/details/80830920?depth_1-utm_source=distribute.pc_relevant.none-task&utm_source=distribute.pc_relevant.none-task
           for(int j=0; j<height; j++)
               fwrite(_frame->data[0] + j * _frame->linesize[0], 1, width, dump_yuv);
           for(int j=0; j<height/2; j++)
               fwrite(_frame->data[1] + j * _frame->linesize[1], 1, width/2, dump_yuv);
           for(int j=0; j<height/2; j++)
               fwrite(_frame->data[2] + j * _frame->linesize[2], 1, width/2, dump_yuv);

           fflush(dump_yuv);
       }
       return 0;
   }
   out_len = 0;
   return 0;
}

优化过后的推流拉流就先讲到这里,下一节是要音频数据添加上来,现在为了简单处理都只是使用了视频数据。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章