利用FFmpeg對火眼一體攝像機的回調數據進行處理:YUV轉H264,H264封裝flv,所有輸入都是在內存中。

整個工程代碼下載地址

http://download.csdn.net/download/gongluck93/10175326

Code

//#define WIN32_LEAN_AND_MEAN// 從 Windows 頭中排除極少使用的資料

#include <WinSock2.h>
#include <Windows.h>
#include <stdio.h>
#include <queue>
#include "VzLPRClientSDK.h"

using namespace std;

// ffmpeg
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif

#ifdef __cplusplus  
extern "C" 
{  
#endif  
#include "libavcodec\avcodec.h"
#include "libavformat\avformat.h"
#include "libswscale\swscale.h"
#include "libavutil\error.h"
#include "libswresample\swresample.h"
#ifdef __cplusplus  
}  
#endif  

#pragma comment( lib, "libgcc.a")  
#pragma comment( lib, "libmingwex.a")  
#pragma comment( lib, "libcoldname.a")  
#pragma comment( lib, "libavcodec.a")  
#pragma comment( lib, "libavformat.a")  
#pragma comment( lib, "libavutil.a")  
#pragma comment( lib, "libswscale.a")  
#pragma comment( lib, "libz.a")  
#pragma comment( lib, "libfaac.a")  
#pragma comment( lib, "libgsm.a")  
#pragma comment( lib, "libmp3lame.a")  
#pragma comment( lib, "libogg.a")  
#pragma comment( lib, "libspeex.a")  
#pragma comment( lib, "libtheora.a")  
#pragma comment( lib, "libvorbis.a")  
#pragma comment( lib, "libvorbisenc.a")  
#pragma comment( lib, "libx264.a")  
#pragma comment( lib, "xvidcore.a") 
#pragma comment( lib, "libswresample.a")

#pragma comment(lib, "VzLPRSDK.lib")
#pragma comment(lib, "WS2_32.lib")

#define SAFEFREE(ptr, func) if(ptr != NULL) { func(ptr); ptr = NULL; }
#define STOPNUM 150
#define IO_BUFFER_SIZE (32768)

typedef struct st_package
{
    unsigned char* buf;
    unsigned int len;
    unsigned int pre;//記錄已經被讀取的數據長度
}package;
queue<package*> g_que_pkg;

static FILE* g_fp_out = fopen("test.h264", "wb+");
static FILE* g_fp_H264 = fopen("save.h264", "wb+");
static FILE* g_fp_yuv = fopen("save.yuv", "wb+");

static AVPacket g_packet;
static AVFrame* g_pFra = avcodec_alloc_frame();

static AVCodec* g_pCodec;
static AVCodecContext* g_pCodecCtx;

static int g_frame_index = 0;
static bool g_stopCB1 = false;
static bool g_stopCB2 = false;

int read_buf(void *opaque, uint8_t *buf, int buf_size)
{  
    static package* pkg = NULL;
    unsigned int len = 0;

    //故意不加鎖的
    if(g_frame_index > STOPNUM)
        return -1;
    if(pkg == NULL)
    {
AGAIN:
        if(g_que_pkg.size() == 0)
            goto AGAIN;
        pkg = g_que_pkg.front();
        g_que_pkg.pop();
    }
    len = pkg->len-pkg->pre > buf_size ? buf_size : pkg->len-pkg->pre;
    memcpy(buf, pkg->buf+pkg->pre, len);
    pkg->pre += len;
    if(pkg->pre >= pkg->len)
    {
        free(pkg->buf);
        free(pkg);
        pkg = NULL;
    }

    return len;
}  

void __stdcall GetFrameCB(VzLPRClientHandle handle, void *pUserData, const VzYUV420P *pFrame)
{
    static int i = 0;
    if(g_stopCB1)
        return;

    //YUV420P ---->> h264
    //  方法1:算出一幀數據大小,並用avpicture_fill把buf和frame綁定一起,frame中的linesize也會自動設置
    //  static int picture_size = avpicture_get_size(g_pCodecCtx->pix_fmt, g_pCodecCtx->width, g_pCodecCtx->height);
    //  static uint8_t* picture_buf = (uint8_t *)malloc(picture_size);
    //  g_p_buf = picture_buf;
    //  avpicture_fill((AVPicture *)g_pFra, picture_buf, g_pCodecCtx->pix_fmt, g_pCodecCtx->width, g_pCodecCtx->height);
    //把數據拷貝到frame綁定的內存中(YYYYUV-YYYYUV)
    //  memcpy(g_pFra->data[0], pFrame->pY, pFrame->widthStepY * pFrame->height);
    //  memcpy(g_pFra->data[1], pFrame->pU, pFrame->widthStepU * pFrame->height/2);
    //  memcpy(g_pFra->data[2], pFrame->pV, pFrame->widthStepV * pFrame->height/2);
    //  方法2:自己填寫每個分道的行數據大小
    g_pFra->linesize[0] = g_pCodecCtx->width;
    g_pFra->linesize[1] = g_pCodecCtx->width/2;
    g_pFra->linesize[2] = g_pCodecCtx->width/2;
    //不再自行分配存儲YUV數據的內存空間,直接用回調函數已經分配的
    g_pFra->data[0] = pFrame->pY;
    g_pFra->data[1] = pFrame->pU;
    g_pFra->data[2] = pFrame->pV;
    ////////////////////////////////////
    g_pFra->pts = i++;
    av_init_packet(&g_packet);
    g_packet.data = NULL;
    g_packet.size = 0;
    int got_output;
    int ret = avcodec_encode_video2(g_pCodecCtx, &g_packet, g_pFra, &got_output);
    if (ret < 0)
    {
        printf("function _encode_video2 error !\n");
        return;
    }
    if (got_output)
    {
        printf("Succeed to encode frame: %5d\tsize:%5d\n",i,g_packet.size);
        fwrite(g_packet.data, 1, g_packet.size, g_fp_out);

        fwrite(pFrame->pY, pFrame->widthStepY*pFrame->height, 1, g_fp_yuv);
        fwrite(pFrame->pU, pFrame->widthStepU*pFrame->height/2, 1, g_fp_yuv);
        fwrite(pFrame->pV, pFrame->widthStepV*pFrame->height/2, 1, g_fp_yuv);
    }
    av_free_packet(&g_packet);
    if(g_frame_index >= STOPNUM)
        g_stopCB1 = true;
}

void __stdcall GetRealDataCB(VzLPRClientHandle handle, void *pUserData, VZ_LPRC_DATA_TYPE eDataType, const VZ_LPRC_DATA_INFO *pData)
{
    static int i = 0;
    package* pkg = NULL;

    if(g_stopCB2)
        return;
    switch(eDataType)
    {
    case VZ_LPRC_DATA_ENC_VIDEO:
        printf("Succeed to get a frame: %5d\tsize:%5d K:%d\n", i++, pData->uDataSize, pData->uIsKeyFrame);
        fwrite(pData->pBuffer, pData->uDataSize, 1, g_fp_H264);
        pkg = (package*)malloc(sizeof(package));
        pkg->buf = (uint8_t*)malloc(pData->uDataSize);
        memcpy(pkg->buf, pData->pBuffer, pData->uDataSize);
        pkg->len = pData->uDataSize;
        pkg->pre = 0;
        g_que_pkg.push(pkg);
        break;
    default:
        break;
    }
    if(g_frame_index >= STOPNUM)
        g_stopCB2 = true;
}

int main()
{
    if(VzLPRClient_Setup() == -1)
    {
        printf("function _Setup error !\n");
        return -1;
    }
    av_register_all();

    g_pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    g_pCodecCtx = avcodec_alloc_context3(g_pCodec);
    g_pCodecCtx->bit_rate = 4000000;
    g_pCodecCtx->width = 1280;
    g_pCodecCtx->height = 720;
    g_pCodecCtx->time_base.num = 1;
    g_pCodecCtx->time_base.den = 10;
    g_pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
    avcodec_open2(g_pCodecCtx, g_pCodec, NULL);
    g_pFra->format = g_pCodecCtx->pix_fmt;
    g_pFra->width  = g_pCodecCtx->width;
    g_pFra->height = g_pCodecCtx->height;

    VzLPRClientHandle hClient = VzLPRClient_Open("192.168.3.87", 80, "admin", "admin");
    int framerate;
    VzLPRClient_GetVideoFrameRate(hClient, &framerate);
    VzLPRClient_SetVideoFrameCallBack(hClient, GetFrameCB, NULL);
    VzLPRClient_SetRealDataCallBack(hClient, GetRealDataCB, NULL);

    AVFormatContext* ifmt_ctx=avformat_alloc_context();//用於內存輸入格式上下文
    //輸入流是一個內存buf
    unsigned char* inbuffer=(unsigned char*)malloc(IO_BUFFER_SIZE);  
    AVIOContext *avio_in =avio_alloc_context(inbuffer, IO_BUFFER_SIZE,0,NULL,read_buf,NULL,NULL);    
    ifmt_ctx->pb=avio_in;   
    AVInputFormat* ifmt_v = av_find_input_format("h264"); 
    avformat_open_input(&ifmt_ctx, NULL, ifmt_v, NULL);
    avformat_find_stream_info(ifmt_ctx, NULL);//這裏已經開始讀入輸入流了,調用read_buf

    AVFormatContext* ofmt_ctx = NULL;//輸出
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, "test.flv");
    AVOutputFormat* ofmt = ofmt_ctx->oformat;
    AVStream* out_stream = avformat_new_stream(ofmt_ctx, NULL);

    AVStream* in_stream = ifmt_ctx->streams[0];
    avcodec_copy_context(out_stream->codec, in_stream->codec);

    out_stream->codec->codec_tag = 0;  
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)  
        out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;  
    av_dump_format(ofmt_ctx, 0, "test.flv", 1);  
    if (!(ofmt->flags & AVFMT_NOFILE)) 
    {  
        if (avio_open(&ofmt_ctx->pb, "test.flv", AVIO_FLAG_WRITE) < 0) 
        {  
            printf("Could not open output file '%s'", "test.flv");  
        }  
    }
    avformat_write_header(ofmt_ctx, NULL);

    AVPacket pkt; 
    AVStream *inin_stream;  
    while(av_read_frame(ifmt_ctx, &pkt) >= 0)
    {   
        //假設輸入流有多個
        inin_stream = ifmt_ctx->streams[pkt.stream_index];  
        if (pkt.stream_index == in_stream->index)
        {  
            if (pkt.pts == AV_NOPTS_VALUE)//H264沒有時間戳
            {  
                //由於裸流裏本來就沒有可靠的pts和timebase等數據,所以pts的計算依靠設置的framerate
                double calc_duration = (double)1/framerate;
                pkt.pts = (double)(g_frame_index*calc_duration) / (av_q2d(out_stream->time_base));
                pkt.dts = pkt.pts;
                pkt.duration = calc_duration / av_q2d(out_stream->time_base);
                g_frame_index++;  
            }  
        }  
        pkt.pos = -1;  
        pkt.stream_index = out_stream->index;  

        printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts);  
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) 
        {  
            printf("Error muxing packet\n");  
            break;  
        }  
        av_free_packet(&pkt);  
    }  
    av_write_trailer(ofmt_ctx);  

    while(!g_stopCB1 || !g_stopCB2)
        Sleep(100);
    SAFEFREE(ofmt_ctx, avformat_free_context);
    av_close_input_file(ifmt_ctx);
    SAFEFREE(avio_in, av_free);
    SAFEFREE(inbuffer, free);
    SAFEFREE(g_pCodecCtx, av_free);
    SAFEFREE(g_pFra, av_free);
    SAFEFREE(g_fp_H264, fclose);
    SAFEFREE(g_fp_out, fclose);
    SAFEFREE(g_fp_yuv, fclose);
    VzLPRClient_Cleanup();
    //system("pause");
}

由於這個只是測試的例子,所以是一步貫穿的做法,以後把視頻流推送到服務器的話,要使用多線程。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章