基於FFmpeg和SDL1.2的極簡播放器實現

思路

基於FFmpeg寫一個播放器,其實十分的簡單。實際上,主要是對FFmpeg的API的封裝,同時,我們需要將音視頻通過主機呈現出來,所以還依賴於平臺的SDL庫,整體步驟和思路如下:
1. 編譯用於音視頻解碼的FFmpeg庫;
2. 編譯用於音視頻呈現的SDL庫;
3. 編寫主程序完成對音視頻的整個調度過程;

編譯FFmpeg庫

這個步驟在《與FFmpeg的初次邂逅》(http://blog.csdn.net/ericbar/article/details/69943941)文中已經有所描述,這裏爲了簡單,我們將FFmpeg的各庫編譯成靜態方式,不採用動態庫方式進行鏈接。

編譯SDL庫

這裏SDL庫我們依賴於1.2.15來進行編譯,而不是最新的2.0版本;所以首先到SDL的官方網站http://www.libsdl.org/下載1.2.15版本的源代碼進行傻瓜式的編譯即可,這裏需要注意我們配置的SDL生成庫和頭文件的位置。

ffmpeg@ubuntu:~/work/test$ tar xzvf SDL-1.2.15.tar.gz 
ffmpeg@ubuntu:~/work/test$ cd SDL-1.2.15/
ffmpeg@ubuntu:~/work/test/SDL-1.2.15$ ./configure  --prefix=/home/ffmpeg/work/SDL-1.2.15/out
ffmpeg@ubuntu:~/work/test/SDL-1.2.15$ make

編譯過程中可能會遇到如下錯誤,

./src/video/x11/SDL_x11sym.h:168:17: error: conflicting types for ‘_XData32’
 SDL_X11_SYM(int,_XData32,(Display *dpy,register long *data,unsigned len),(dpy,data,len),return)
                 ^
./src/video/x11/SDL_x11dyn.c:95:5: note: in definition of macro ‘SDL_X11_SYM’
  rc fn params { ret p##fn args ; }
     ^
In file included from ./src/video/x11/SDL_x11dyn.h:34:0,
                 from ./src/video/x11/SDL_x11dyn.c:26:
/usr/include/X11/Xlibint.h:568:12: note: previous declaration of ‘_XData32’ was here
 extern int _XData32(
            ^
build-deps:1129: recipe for target 'build/SDL_x11dyn.lo' failed
make: *** [build/SDL_x11dyn.lo] Error 1
ffmpeg@ubuntu:~/work/SDL-1.2.15$ 

請參考如下方法修改(http://blog.csdn.net/jhting/article/details/38523945),

-SDL_X11_SYM(int,_XData32,(Display *dpy,register long *data,unsigned len),(dpy,data,len),return)  
+SDL_X11_SYM(int,_XData32,(Display *dpy,register _Xconst long *data,unsigned len),(dpy,data,len),return)  

xplayer播放器

暫且叫我們這個播放器叫xplayer吧,對於代碼我們採用Makefile的方式進行管理,其目錄結構如下:
目錄結構

下面是整個播放器的Makefile文件:

# xplayer Makefile Sample

# List Compiler Tools
CC = gcc 
XX = g++ 
CFLAGS = -Wall -O -g

# Compile Target
TARGET = xplayer    

# Include files
INCDIR = /home/ffmpeg/work/ffmpeg-3.2.4/out/include
INCDIR += /home/ffmpeg/work/ffmpeg-3.2.4
INCDIR += /home/ffmpeg/work/SDL-1.2.15/out/include

INCLUDE = $(foreach dir, $(INCDIR), -I$(dir))

LIBPATH = /home/ffmpeg/work/ffmpeg-3.2.4/out/lib
LIBPATH += /home/ffmpeg/work/SDL-1.2.15/out/lib 

LIBSPATH = $(foreach dir, $(LIBPATH), -L$(dir))

# needs to be in linking order
LIB = avfilter avformat avcodec swresample swscale avutil pthread z SDL dl asound

LIBS := $(foreach n,$(LIB),-l$(n))

# Depend on
%.o: %.c 
    $(CC) $(INCLUDE) -c $< -o $@ $(CFLAGS)

%.o:%.cpp 
    $(XX) $(INCLUDE) -c $< -o $@ $(CFLAGS)

# Source Code
SOURCES = $(wildcard *.c *.cpp) 

# Objs File
OBJS = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(SOURCES)))

# BIN depend on
$(TARGET) : $(OBJS) 
    $(XX) -o $(TARGET) $(OBJS) $(LIBS) $(LIBSPATH) 
    chmod a+x $(TARGET) 

# clean
clean : 
    rm -rf $(OBJS)
    rm -rf $(TARGET)

下面是音頻相關的audio.c的文件代碼:

/*
 * Copyright (c) 2017 ericbaba
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */


#include "xplayer.h"

static AVFilterContext *in_audio_filter;   // the first filter in the audio chain
static AVFilterContext *out_audio_filter;  // the last filter in the audio chain
static AVFilterGraph *agraph;              // audio filter graph
static struct AudioParams audio_filter_src;
static double audio_diff_cum; /* used for AV difference average computation */
static double audio_diff_avg_coef;
static double audio_diff_threshold;
static int audio_diff_avg_count;
static double audio_clock;
static int audio_buf_size;
static int audio_buf_index;


static int synchronize_audio(short *samples, int samples_size)
{
    int n;
    double ref_clock;
    double diff, avg_diff;
    int wanted_size, min_size, max_size;

    ref_clock = get_master_clock();
    diff = get_audio_clock() - ref_clock;

    if(diff < AV_NOSYNC_THRESHOLD) 
    {
        // accumulate the diffs
        audio_diff_cum = diff + audio_diff_avg_coef * audio_diff_cum;
        if(audio_diff_avg_count < AUDIO_DIFF_AVG_NB)
        {
            audio_diff_avg_count++;
        }
        else
        {
            avg_diff = audio_diff_cum * (1.0 - audio_diff_avg_coef);

            if(fabs(avg_diff) >= audio_diff_threshold) 
            {
                n = (2 * global_context.acodec_ctx->channels);
                wanted_size = samples_size + ((int)(diff * global_context.acodec_ctx->sample_rate) * n);
                min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX)  / 100);
                max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX)  / 100);
                if(wanted_size < min_size) 
                {
                    wanted_size = min_size;
                } 
                else if (wanted_size > max_size) 
                {
                    wanted_size = max_size;
                }

                if(wanted_size < samples_size) 
                {
                    samples_size = wanted_size;
                } 
                else if(wanted_size > samples_size)
                {
                    uint8_t *samples_end, *q;
                    int nb;

                    nb = (samples_size - wanted_size);
                    samples_end = (uint8_t *)samples + samples_size - n;
                    q = samples_end + n;
                    while(nb > 0) 
                    {
                        memcpy(q, samples_end, n);
                        q += n;
                        nb -= n;
                    }
                    samples_size = wanted_size;
                }
            }
        }
    }
    else 
    {
        audio_diff_avg_count = 0;
        audio_diff_cum = 0;
    }

    return samples_size;

}


static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, AVFilterContext **sink)
{
    AVFilterGraph *filter_graph;
    AVFilterContext *abuffer_ctx;
    AVFilter        *abuffer;
    AVFilterContext *aformat_ctx;
    AVFilter        *aformat;
    AVFilterContext *abuffersink_ctx;
    AVFilter        *abuffersink;

    uint8_t options_str[1024];
    uint8_t ch_layout[64];

    int err;

    /* Create a new filtergraph, which will contain all the filters. */
    filter_graph = avfilter_graph_alloc();
    if (!filter_graph) 
    {
        av_log(NULL, AV_LOG_ERROR, "Unable to create filter graph.\n");
        return AVERROR(ENOMEM);
    }

    /* Create the abuffer filter;
    * it will be used for feeding the data into the graph. */
    abuffer = avfilter_get_by_name("abuffer");
    if (!abuffer)
    {
        av_log(NULL, AV_LOG_ERROR, "Could not find the abuffer filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
    if (!abuffer_ctx)
    {
        av_log(NULL, AV_LOG_ERROR, "Could not allocate the abuffer instance.\n");
        return AVERROR(ENOMEM);
    }

    /* Set the filter options through the AVOptions API. */
    av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, audio_filter_src.channel_layout);
    av_opt_set    (abuffer_ctx, "channel_layout", ch_layout,                            AV_OPT_SEARCH_CHILDREN);
    av_opt_set    (abuffer_ctx, "sample_fmt",     av_get_sample_fmt_name(audio_filter_src.fmt), AV_OPT_SEARCH_CHILDREN);
    av_opt_set_q  (abuffer_ctx, "time_base",      (AVRational){ 1, audio_filter_src.freq},  AV_OPT_SEARCH_CHILDREN);
    av_opt_set_int(abuffer_ctx, "sample_rate",    audio_filter_src.freq,                     AV_OPT_SEARCH_CHILDREN);

    /* Now initialize the filter; we pass NULL options, since we have already
    * set all the options above. */
    err = avfilter_init_str(abuffer_ctx, NULL);
    if (err < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Could not initialize the abuffer filter.\n");
        return err;
    }


    /* Create the aformat filter;
    * it ensures that the output is of the format we want. */
    aformat = avfilter_get_by_name("aformat");
    if (!aformat) 
    {
        av_log(NULL, AV_LOG_ERROR, "Could not find the aformat filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
    if (!aformat_ctx) 
    {
        av_log(NULL, AV_LOG_ERROR, "Could not allocate the aformat instance.\n");
        return AVERROR(ENOMEM);
    }

    /* A third way of passing the options is in a string of the form
    * key1=value1:key2=value2.... */
    snprintf(options_str, sizeof(options_str),
        "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), audio_filter_src.freq,
        (uint64_t)audio_filter_src.channel_layout);
    err = avfilter_init_str(aformat_ctx, options_str);
    if (err < 0) 
    {
        av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
        return err;
    }

    /* Finally create the abuffersink filter;
    * it will be used to get the filtered data out of the graph. */
    abuffersink = avfilter_get_by_name("abuffersink");
    if (!abuffersink) 
    {
        av_log(NULL, AV_LOG_ERROR, "Could not find the abuffersink filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
    if (!abuffersink_ctx) 
    {
        av_log(NULL, AV_LOG_ERROR, "Could not allocate the abuffersink instance.\n");
        return AVERROR(ENOMEM);
    }

    /* This filter takes no options. */
    err = avfilter_init_str(abuffersink_ctx, NULL);
    if (err < 0) 
    {
        av_log(NULL, AV_LOG_ERROR, "Could not initialize the abuffersink instance.\n");
        return err;
    }


    /* Connect the filters;
    * in this simple case the filters just form a linear chain. */
    err = avfilter_link(abuffer_ctx, 0, aformat_ctx, 0);
    if (err >= 0)
    {
        err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
    }

    if (err < 0) 
    {
        av_log(NULL, AV_LOG_ERROR, "Error connecting filters\n");
        return err;
    }

    /* Configure the graph. */
    err = avfilter_graph_config(filter_graph, NULL);
    if (err < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
        return err;
    }

    *graph = filter_graph;
    *src   = abuffer_ctx;
    *sink  = abuffersink_ctx;

    return 0;
 }




 static inline int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
 {
     if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
     {
         return channel_layout;
     }
     else
     {
         return 0;
     }
 }

 // decode a new packet(not multi-frame)
 // return decoded frame size, not decoded packet size
static int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) 
{
    static AVPacket pkt;
    static uint8_t *audio_pkt_data = NULL;
    static int audio_pkt_size = 0;
    int len1, data_size;
    int got_frame;
    AVFrame * frame = NULL;
    static int reconfigure = 1;
    int ret = -1;

    for(;;) 
    {
        while(audio_pkt_size > 0) 
        {         
            if(NULL == frame)
            {
                frame = av_frame_alloc();
            }

            data_size = buf_size;
            got_frame = 0;

            // len1 is decoded packet size
            len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, &pkt);
            if(got_frame)
            {
                if (reconfigure) 
                {
                    reconfigure = 0;
                    int64_t dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));

                    // used by init_filter_graph()
                    audio_filter_src.fmt            = frame->format;
                    audio_filter_src.channels       = av_frame_get_channels(frame);
                    audio_filter_src.channel_layout = dec_channel_layout;
                    audio_filter_src.freq           = frame->sample_rate;

                    init_filter_graph(&agraph, &in_audio_filter, &out_audio_filter);
                }

                if ((ret = av_buffersrc_add_frame(in_audio_filter, frame)) < 0)
                {
                    av_log(NULL, AV_LOG_ERROR, "av_buffersrc_add_frame :  failure. \n");        
                    return ret;
                }

                if ((ret = av_buffersink_get_frame(out_audio_filter, frame)) < 0) 
                {
                    av_log(NULL, AV_LOG_ERROR, "av_buffersink_get_frame :  failure. \n");
                    continue;
                }

                data_size = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, frame->format, 1);      

                // len1 is decoded packet size
                // < 0  means failure or error,so break to get a new packet
                if(len1 < 0) 
                {
                    audio_pkt_size = 0;
                    av_log(NULL, AV_LOG_ERROR, "avcodec_decode_audio4 failure. \n");
                    break;
                } 

                // decoded data to audio buf
                memcpy(audio_buf, frame->data[0], data_size);

                audio_pkt_data += len1;
                audio_pkt_size -= len1;

                int  n = 2 * global_context.acodec_ctx->channels;
                audio_clock += (double)data_size / (double)(n * global_context.acodec_ctx->sample_rate); // add bytes offset
                av_free_packet(&pkt);
                av_frame_free(&frame);

                return data_size;
            }
        }

        av_free_packet(&pkt);
        av_frame_free(&frame);

        // get a new packet
        if(packet_queue_get(&global_context.audio_queue, &pkt, 1) < 0) 
        {
            return -1;
        }
        audio_pkt_data = pkt.data;
        audio_pkt_size = pkt.size;

        // save current pts clock
        if(pkt.pts != AV_NOPTS_VALUE) 
        {
            audio_clock = pkt.pts * av_q2d(global_context.astream->time_base);
        }
     }

     return ret;
 }


double get_audio_clock() 
{
    double pts;
    int hw_buf_size, bytes_per_sec, n;

    pts = audio_clock;
    hw_buf_size = audio_buf_size - audio_buf_index;
    bytes_per_sec = 0;
    n = global_context.acodec_ctx->channels * 2;
    bytes_per_sec = global_context.acodec_ctx->sample_rate * n;

    if(bytes_per_sec) {
        pts -= (double)hw_buf_size / bytes_per_sec;
    }

    return audio_clock;
}


void audio_callback(void *userdata, Uint8 *stream, int len)
{
    AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
    int len1, audio_size;
    static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE*3)/2 + FF_INPUT_BUFFER_PADDING_SIZE];

    while(len > 0) 
    {        
        // "audio_buf_index >= audio_buf_size" means all decoded data have aleady read over
        // we need read new packet for decode
        if(audio_buf_index >= audio_buf_size) 
        {
            // decode a new packet, result in a new frame
            audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
            if(audio_size < 0) 
            {
                // decode no data, reset buffer
                audio_buf_size = (AVCODEC_MAX_AUDIO_FRAME_SIZE*3)/2 + FF_INPUT_BUFFER_PADDING_SIZE;
                audio_buf_index = 0;
                memset(audio_buf, 0, audio_buf_size);
            }
            else 
            {
                // decode ok, sync audio, just give synced size 
                audio_size = synchronize_audio((int16_t *)audio_buf, audio_size);
                audio_buf_size = audio_size;
                audio_buf_index = 0;
            }
        }

        // copy buffer data(decoded) to audio device(stream)
        len1 = audio_buf_size - audio_buf_index;    // remained decoded data size
        if(len1 > len)
        {
            len1 = len;
        }

        memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}

下面是視頻相關的文件video.c的源代碼:

/*
 * Copyright (c) 2017 ericbaba
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */


#include "xplayer.h"


static double video_clock; 
static double video_current_pts;
static double video_current_pts;


static double synchronize_video(AVFrame *src_frame, double pts)
{
    double time_base;
    double frame_delay = 0;

    if(pts != 0) 
    {
        video_clock = pts;
    }
    else
    {
        pts = video_clock;
    }

    time_base = av_q2d(global_context.vstream->time_base);
    frame_delay += (src_frame->repeat_pict * (time_base * 0.5));

    video_clock += frame_delay;

    return pts;
}


static int img_convert(AVPicture *dst, int dst_pix_fmt,
                const AVPicture *src, int src_pix_fmt,
                int src_width, int src_height)
{
    int w;
    int h;
    struct SwsContext *pSwsCtx;

    w = src_width;
    h = src_height;

    pSwsCtx = sws_getContext(w, h, src_pix_fmt, w, h, dst_pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
    sws_scale(pSwsCtx, (const uint8_t* const*)src->data, src->linesize, 0, h, dst->data, dst->linesize);

    //這裏釋放掉pSwsCtx的內存
    return 0;
}

static int queue_picture(AVFrame *pFrame, double pts) 
{
    VideoPicture *vp;
    int dst_pix_fmt;
    AVPicture pict;
    SDL_Event event; 

    SDL_LockMutex(global_context.pictq_mutex);

    while(global_context.pictq_size >= VIDEO_PICTURE_QUEUE_SIZE)
    {
        SDL_CondWait(global_context.pictq_cond, global_context.pictq_mutex);
    }
    SDL_UnlockMutex(global_context.pictq_mutex);

    // windex is set to 0 initially
    vp = &global_context.pictq[global_context.pictq_windex];

    if(!vp->bmp ||
    vp->width != global_context.vcodec_ctx->width ||
    vp->height != global_context.vcodec_ctx->height) 
    {
        vp->allocated = 0;
        event.type = FF_ALLOC_EVENT;
        SDL_PushEvent(&event);

        SDL_LockMutex(global_context.pictq_mutex);
        while(!vp->allocated)
        {
            SDL_CondWait(global_context.pictq_cond, global_context.pictq_mutex);
        }
        SDL_UnlockMutex(global_context.pictq_mutex);
    }

    if(vp->bmp) 
    {
        vp->pts = pts;
        SDL_LockYUVOverlay(vp->bmp);

        dst_pix_fmt = AV_PIX_FMT_YUV420P;
        pict.data[0] = vp->bmp->pixels[0];
        pict.data[1] = vp->bmp->pixels[2];
        pict.data[2] = vp->bmp->pixels[1];
        pict.linesize[0] = vp->bmp->pitches[0];
        pict.linesize[1] = vp->bmp->pitches[2];
        pict.linesize[2] = vp->bmp->pitches[1];

        // Convert the image into YUV format that SDL uses
        img_convert(&pict, dst_pix_fmt, (AVPicture *)pFrame, global_context.vcodec_ctx->pix_fmt,
                            global_context.vcodec_ctx->width, global_context.vcodec_ctx->height);

        SDL_UnlockYUVOverlay(vp->bmp);

        if(++global_context.pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) 
        {
            global_context.pictq_windex = 0;
        }

        SDL_LockMutex(global_context.pictq_mutex);
        global_context.pictq_size++;
        SDL_UnlockMutex(global_context.pictq_mutex);

    }

    return 0;
}


double get_video_clock() 
{
    double delta = (av_gettime() - global_context.video_current_pts_time) / 1000000.0;
    return video_current_pts + delta;
}


int video_thread(void *arg) 
{
    AVPacket pkt1;
    AVPacket *packet = &pkt1;
    int frameFinished;
    AVFrame *pFrame;
    double pts;

    pFrame = av_frame_alloc();

    for(;;) 
    {
        if(packet_queue_get(&global_context.video_queue, packet, 1) < 0)
        {
            // means we quit getting packets
            av_log(NULL, AV_LOG_ERROR, "packet_queue_get failure . \n");
            break;
        }

        avcodec_decode_video2(global_context.vcodec_ctx, pFrame, &frameFinished, packet);

        // Did we get a video frame?
        if(frameFinished)
        {        
            pts = pFrame->pkt_pts*av_q2d(global_context.vstream->time_base);    
            pts = synchronize_video(pFrame, pts);  
            if(queue_picture(pFrame,  pts) < 0) 
            {
                break;
            }
        }
        av_packet_unref(packet);
        av_init_packet(packet);
    }

    av_free(pFrame);

    return 0;
}


void alloc_picture(void *userdata) 
{
    VideoPicture *vp;

    vp = &global_context.pictq[global_context.pictq_windex];
    if(vp->bmp) 
    {
        // we already have one make another, bigger/smaller
        SDL_FreeYUVOverlay(vp->bmp);
    }
    // Allocate a place to put our YUV image on that screen
    vp->bmp = SDL_CreateYUVOverlay(global_context.vcodec_ctx->width, global_context.vcodec_ctx->height, SDL_YV12_OVERLAY, global_context.screen);
    vp->width = global_context.vcodec_ctx->width;
    vp->height = global_context.vcodec_ctx->height;

    SDL_LockMutex(global_context.pictq_mutex);
    vp->allocated = 1;
    SDL_CondSignal(global_context.pictq_cond);
    SDL_UnlockMutex(global_context.pictq_mutex);
}


void video_display() 
{
    SDL_Rect rect;
    VideoPicture *vp;
    float aspect_ratio;
    int w, h, x, y;

    vp = &global_context.pictq[global_context.pictq_rindex];

    if(vp->bmp) 
    {
        if(global_context.vcodec_ctx->sample_aspect_ratio.num == 0) 
        {
            aspect_ratio = 0;
        }
        else
        {
            aspect_ratio = av_q2d(global_context.vcodec_ctx->sample_aspect_ratio) *
            global_context.vcodec_ctx->width / global_context.vcodec_ctx->height;
        }

        if(aspect_ratio <= 0.0)
        {
            aspect_ratio = (float)global_context.vcodec_ctx->width /(float)global_context.vcodec_ctx->height;
        }

        h = global_context.screen->h;
        w = ((int)rint(h * aspect_ratio)) & -3;

        if(w > global_context.screen->w)
        {
            w = global_context.screen->w;
            h = ((int)rint(w / aspect_ratio)) & -3;
        }

        x = (global_context.screen->w - w) / 2;
        y = (global_context.screen->h - h) / 2;

        rect.x = x;
        rect.y = y;
        rect.w = w;
        rect.h = h;
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
    }
}



void video_refresh_timer()
{
    VideoPicture *vp;
    double actual_delay, delay, sync_threshold, ref_clock, diff;

    if(global_context.pictq_size == 0)
    {
        schedule_refresh(1);
    } 
    else 
    {
        vp = &global_context.pictq[global_context.pictq_rindex];

        video_current_pts = vp->pts;
        global_context.video_current_pts_time = av_gettime();

        delay = vp->pts - global_context.frame_last_pts;

        if(delay <= 0 || delay >= 1.0)
        { // 非法值判斷
            delay = global_context.frame_last_delay;
        }

        global_context.frame_last_delay = delay;
        global_context.frame_last_pts = vp->pts;

        ref_clock = get_master_clock();
        diff = vp->pts - ref_clock;

        sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
        if(fabs(diff) < AV_NOSYNC_THRESHOLD)
        {
            //av_log(NULL, AV_LOG_ERROR, " diff < 10 \n");            
            if(diff <= -sync_threshold) 
            {
                av_log(NULL, AV_LOG_ERROR, "video_refresh_timer : repeat. \n");            
                delay = 0;
            } 
            else if(diff >= sync_threshold) 
            {
                av_log(NULL, AV_LOG_ERROR, "video_refresh_timer : skip. \n");            
                delay = 2 * delay;
            }
        }
        else
        {
            av_log(NULL, AV_LOG_ERROR, " diff > 10 , diff = %f, vp->pts = %f , ref_clock = %f\n", diff , vp->pts , ref_clock);                  
        }

        global_context.frame_timer += delay;

        actual_delay = global_context.frame_timer - (av_gettime() / 1000000.0);
        if(actual_delay < 0.010) 
        {    //每秒100幀的刷新率不存在
            actual_delay = 0.010;
        }
        schedule_refresh((int)(actual_delay * 1000 + 0.5)); //add 0.5 for 進位

        video_display();

        if(++global_context.pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
        {
            global_context.pictq_rindex = 0;
        }

        SDL_LockMutex(global_context.pictq_mutex);
        global_context.pictq_size--;
        SDL_CondSignal(global_context.pictq_cond);
        SDL_UnlockMutex(global_context.pictq_mutex);
    }
}

下面是公共函數文件util.c的源代碼:

/*
 * Copyright (c) 2017 ericbaba
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */


#include "xplayer.h"

static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
{
    SDL_Event event;

    event.type = FF_REFRESH_EVENT;
    event.user.data1 = opaque;
    SDL_PushEvent(&event);

    return 0;
}


void schedule_refresh(int delay) 
{
    SDL_AddTimer(delay, sdl_refresh_timer_cb , NULL);
}

void packet_queue_init(PacketQueue *q) 
{
    memset(q, 0, sizeof(PacketQueue));
    q->mutex = SDL_CreateMutex();
    q->cond = SDL_CreateCond();
}


int packet_queue_put(PacketQueue *q, AVPacket *pkt) 
{
    AVPacketList *pkt1;

    if(av_dup_packet(pkt) < 0) 
    {
        return -1;
    }

    pkt1 = av_malloc(sizeof(AVPacketList));
    if (!pkt1)
    {
        return -1;
    }

    pkt1->pkt = *pkt;
    pkt1->next = NULL;

    SDL_LockMutex(q->mutex);

    if (!q->last_pkt)
    {
        q->first_pkt = pkt1;
    }
    else
    {
        q->last_pkt->next = pkt1;
    }

    q->last_pkt = pkt1;
    q->nb_packets++;
    q->size += pkt1->pkt.size;
    SDL_CondSignal(q->cond);
    SDL_UnlockMutex(q->mutex);

    return 0;
}


int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
    AVPacketList *pkt1;
    int ret;

    SDL_LockMutex(q->mutex);

    for(;;) 
    {
        if(global_context.quit)
        {
            ret = -1;
            break;
        }

        pkt1 = q->first_pkt;

        if (pkt1) 
        {
            q->first_pkt = pkt1->next;

            if (!q->first_pkt)
            {
                q->last_pkt = NULL;
            }

            q->nb_packets--;
            q->size -= pkt1->pkt.size;
            *pkt = pkt1->pkt;
            av_free(pkt1);
            ret = 1;
            break;
        }
        else if (!block) 
        {
            ret = 0;
            break;
        }
        else 
        {
            SDL_CondWait(q->cond, q->mutex);
        }
    }

    SDL_UnlockMutex(q->mutex);

    return ret;
}

int packet_queue_size(PacketQueue *q) 
{
    return q->size;
}

下面是核心的播放器控制代碼xplayer.c及頭文件xplayer.h源代碼:

/*
 * Copyright (c) 2017 ericbaba
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include <stdio.h>
#include <signal.h>
#include "xplayer.h"

#define TEST_FILE_AVI "/home/ffmpeg/work/ljr.avi"
#define TEST_FILE_4K "/home/ffmpeg/work/4k.mp4"
#define TEST_FILE_TS "/home/ffmpeg/work/clear.ts"
#define TEST_FILE_H265 "/home/ffmpeg/work/surfing.265"
#define TEST_FILE_1080P "/home/ffmpeg/work/1080p.avi"
#define TEST_FILE_FLV "/home/ffmpeg/work/flvtest.flv"
#define TEST_FILE_AAC "/home/ffmpeg/work/aaclc.mp4"
#define TEST_FILE_JPG "/home/ffmpeg/work/11.jpg"
#define TEST_FILE_MP3 "/home/ffmpeg/work/lkdd.mp3"
#define TEST_FILE_MLH_MP3 "/home/ffmpeg/work/mlh.mp3"
#define TEST_FILE_WAV "/home/ffmpeg/work/xpstart.wav"
#define TEST_FILE_MPG "/home/ffmpeg/work/rec.mpg"
#define TEST_FILE_RMVB "/home/ffmpeg/work/tj.rmvb"
#define TEST_FILE_RTSP "rtsp://10.18.69.232:8554/clear.ts"
#define TEST_FILE_VIDEO_DST "/home/ffmpeg/work/study/test/readframe/video.es"
#define TEST_FILE_HD_CTS "/home/ffmpeg/work/bbb_short.ffmpeg.1280x720.mp4.libx264_5000kbps_30fps.libfaac_stereo_192kbps_48000Hz.mp4"
#define TEST_FILE_HD_CTS2 "/home/ffmpeg/work/bbb_short.ffmpeg.480x360.mp4.libx264_500kbps_25fps.libfaac_stereo_128kbps_44100Hz.mp4"


#define TEST_FILE_NAME TEST_FILE_AAC

#define SDL_AUDIO_BUFFER_SIZE 4096

static int av_sync_type = AV_SYNC_AUDIO_MASTER;
GlobalContext global_context;


static void sigterm_handler(int sig)
{
    SDL_Event event;

    av_log(NULL, AV_LOG_ERROR, "sigterm_handler : sig is %d \n", sig);    

    event.type = FF_QUIT_EVENT;
    SDL_PushEvent(&event);

    exit(123);
}


double get_master_clock() {
    if(av_sync_type == AV_SYNC_VIDEO_MASTER) {
        return get_video_clock();
    } else if(av_sync_type == AV_SYNC_AUDIO_MASTER) {
        return get_audio_clock();
    } else {
        return get_audio_clock();
    }
}


int event_thread(void *arg)
{
    SDL_Event event;

    for(;;) {

        SDL_WaitEvent(&event);

        switch(event.type) { 
            case FF_ALLOC_EVENT:
                alloc_picture(event.user.data1);
                break;    
            case FF_QUIT_EVENT:
                global_context.quit = 1;
                break;
            case FF_REFRESH_EVENT:
                video_refresh_timer(event.user.data1);
                break;
        }
    }
}





int main(int argc, char **argv)
{
    int i;
    int err = 0;
    int framecnt;
    AVFormatContext *fmt_ctx = NULL;
    AVDictionaryEntry *dict = NULL;
    AVPacket pkt;
    int audio_stream_index = -1;
    int video_stream_index = -1;
    SDL_AudioSpec desired;
    SDL_AudioSpec spec;
    SDL_Event event;

    global_context.quit = 0;

    // register INT/TERM signal
    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */

    // set log level
    av_log_set_level(AV_LOG_WARNING);

    /* register all codecs, demux and protocols */
    avfilter_register_all();
    av_register_all();
    avformat_network_init();

    fmt_ctx = avformat_alloc_context();
    err = avformat_open_input(&fmt_ctx, TEST_FILE_NAME, NULL, NULL);
    if (err < 0) {
        av_log(NULL, AV_LOG_ERROR, "avformat_open_input : err is %d \n", err);
        err = -1;
        goto failure;
    }

    if ((err = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "avformat_find_stream_info : err is %d \n", err);
        err = -1;
        goto failure;
    }

    // search video stream in all streams.
    for (i = 0; i < fmt_ctx->nb_streams; i++)
    {
        // because video stream only one, so found and stop.
        if (fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            video_stream_index = i;
            break;
        }
    }

    // search audio stream in all streams.
    for (i = 0; i < fmt_ctx->nb_streams; i++)
    {
        // we used the first audio stream
        if (fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            audio_stream_index = i;
            break;
        }
    }

    // if no video and audio, exit
    if((-1 == video_stream_index) && (-1 == audio_stream_index)){
        goto failure;
    }

    // open video
    if(-1 != video_stream_index)
    {
        global_context.vcodec_ctx = fmt_ctx->streams[video_stream_index]->codec;
        global_context.vstream = fmt_ctx->streams[video_stream_index];
        global_context.vcodec = avcodec_find_decoder(global_context.vcodec_ctx->codec_id);
        if(NULL == global_context.vcodec)
        {
            av_log(NULL, AV_LOG_ERROR, "avcodec_find_decoder failure. \n");
            goto failure;
        }

        if(avcodec_open2(global_context.vcodec_ctx, global_context.vcodec, NULL) < 0 )
        {
            av_log(NULL, AV_LOG_ERROR, "avcodec_open2 failure. \n");
            goto failure;
        }
    }

    // open audio
    if(-1 != audio_stream_index)
    {
        global_context.acodec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
        global_context.astream = fmt_ctx->streams[audio_stream_index];
        global_context.acodec = avcodec_find_decoder(global_context.acodec_ctx->codec_id);
        if(NULL == global_context.acodec)
        {
            av_log(NULL, AV_LOG_ERROR, "avcodec_find_decoder failure. \n");
            err = -1;
            goto failure;
        }

        if(avcodec_open2(global_context.acodec_ctx, global_context.acodec, NULL) < 0 )
        {
            av_log(NULL, AV_LOG_ERROR, "avcodec_open2 failure. \n");
            err = -1;
            goto failure;
        }
    }

    // SDL init
    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        av_log(NULL, AV_LOG_ERROR, "Could not initialize SDL - %s\n", SDL_GetError());
        goto failure;
    }

    // open SDL video surface
    if(-1 != video_stream_index)
    {
        // get vout screen
        global_context.screen = SDL_SetVideoMode(global_context.vcodec_ctx->width, global_context.vcodec_ctx->height, 0, 0); // SDL_NOFRAME
        if(!global_context.screen) {
            av_log(NULL, AV_LOG_ERROR, "SDL: could not set video mode - exiting\n");
            goto failure;
        }

        global_context.pictq_mutex = SDL_CreateMutex();
        global_context.pictq_cond = SDL_CreateCond();

    }

    // open SDL audio device, audio decode at audio_callback
    desired.freq = global_context.acodec_ctx->sample_rate;
    desired.format = AUDIO_S16SYS;
    desired.channels = global_context.acodec_ctx->channels;
    desired.silence = 0;
    desired.samples = SDL_AUDIO_BUFFER_SIZE;
    desired.callback = audio_callback;
    desired.userdata = global_context.acodec_ctx;
    if(SDL_OpenAudio(&desired, &spec) < 0) {
        av_log(NULL, AV_LOG_ERROR, "SDL_OpenAudio: %s\n", SDL_GetError());
        goto failure;
    }

    // check audio open result, AUDIO_S16SYS is test setup
    if(spec.format != AUDIO_S16SYS)
    {
        av_log(NULL, AV_LOG_ERROR, "spec.format != AUDIO_S16SYS . \n");
        goto failure;
    }

    // init frame time
    global_context.frame_timer = (double)av_gettime() / 1000000.0;
    global_context.frame_last_delay = 40e-3;

    // init video current pts
    global_context.video_current_pts_time = av_gettime();

    // creat event manage thread
    SDL_CreateThread(event_thread, NULL);

    // init audio and video packet queue
    packet_queue_init(&global_context.video_queue);
    packet_queue_init(&global_context.audio_queue);

    // start audio device
    SDL_PauseAudio(0);

    if(-1 != video_stream_index)
    {
        // creat video decode thread
        SDL_CreateThread(video_thread, NULL);
        // video display timer
        video_refresh_timer(0);
    }

    // read url media data circle
    while(av_read_frame(fmt_ctx, &pkt) >= 0) {
        if (pkt.stream_index == video_stream_index) {
            packet_queue_put(&global_context.video_queue, &pkt);
        } else  if(pkt.stream_index == audio_stream_index) {
            packet_queue_put(&global_context.audio_queue, &pkt);
        } else {
            av_free_packet(&pkt);
        }
    }

    // wait exit
    while(!global_context.quit)
    {
        SDL_Delay(100);        
    }

failure:

    if (fmt_ctx) {
        avformat_close_input(&fmt_ctx);
        avformat_free_context(fmt_ctx);
    }

    avformat_network_deinit();

    return 0;
}

頭文件如下:

/*
 * Copyright (c) 2017 ericbaba
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */


#ifndef __XPLAYER_H__
#define __XPLAYER_H__

#include "config.h"

#include "libavutil/log.h"
#include "libavutil/time.h"
#include "libavutil/samplefmt.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavcodec/internal.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"

#include "SDL/SDL.h"

#if CONFIG_AVDEVICE
#include "libavdevice/avdevice.h"
#endif
#if CONFIG_AVFILTER
#include "libavfilter/avfilter.h"
#endif


#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)

#define VIDEO_PICTURE_QUEUE_SIZE 3

#define AV_SYNC_THRESHOLD 0.1
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB   20
/* maximum audio speed change to get correct sync */
#define SAMPLE_CORRECTION_PERCENT_MAX 10

#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio


enum {
    AV_SYNC_AUDIO_MASTER,
    AV_SYNC_VIDEO_MASTER,
    AV_SYNC_EXTERNAL_MASTER,
};


typedef struct PacketQueue {
    AVPacketList *first_pkt, *last_pkt;
    int nb_packets;
    int size;
    int abort_request;
    int serial;
    SDL_mutex *mutex;
    SDL_cond *cond;
} PacketQueue;


typedef struct VideoPicture {
    SDL_Overlay *bmp;
    int width, height;
    int allocated;
    double pts;
    void *opaque;
} VideoPicture;


typedef struct AudioParams {
    int freq;
    int channels;
    int64_t channel_layout;
    enum AVSampleFormat fmt;
    int frame_size;
    int bytes_per_sec;
} AudioParams;


typedef struct GlobalContexts {
    AVCodecContext *acodec_ctx;
    AVCodecContext *vcodec_ctx;
    AVStream *vstream;
    AVStream *astream;
    AVCodec *vcodec;
    AVCodec *acodec;
    SDL_Surface *screen;
    SDL_mutex *pictq_mutex;
    SDL_cond *pictq_cond;    

    PacketQueue audio_queue;
    PacketQueue video_queue;

    int pictq_size;
    int pictq_windex;
    int pictq_rindex;
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];

    int audio_buf_size;
    int audio_buf_index;

    int64_t video_current_pts_time;

    double frame_last_delay;
    double frame_last_pts;
    double frame_timer;

    int quit;

}GlobalContext;







double get_master_clock() ;
double get_audio_clock() ;
double get_video_clock() ;
void schedule_refresh(int delay);
void packet_queue_init(PacketQueue *q) ;
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block);
int packet_queue_put(PacketQueue *q, AVPacket *pkt) ;
void audio_callback(void *userdata, Uint8 *stream, int len);
int video_thread(void *arg) ;

extern GlobalContext global_context;


#endif /* __XPLAYER_H__ */

其中,xplayer.c中的 TEST_FILE_NAME 宏定義用於指定我們播放的文件絕對路徑。
接下來,在xplayer目錄中執行make即可編譯,Makefile中指定了FFmpeg和SDL庫及頭文件的路徑。

執行程序之前,有兩點可能要注意:

  1. 需要先指定SDL動態庫加載的位置:
export LD_LIBRARY_PATH=/home/ffmpeg/work/SDL-1.2.15/out/lib:$LD_LIBRARY_PATH
 2.Ubuntu系統需要安裝sound包,否則運行時可能會報錯:
sudo apt-get install libasound-dev

下面執行程序,即可看到Ubuntu窗口呈現解碼後的視頻,並且可以聽到播放的聲音了。

ffmpeg@ubuntu:~/work/xplayer$ ./xplayer

播放後的視頻界面如下:

播放視頻

是不是很簡單呢,大家都可以試試。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章