使用ffmpeg繪製運動向量MV

使用ffmpeg繪製運動向量MV

本文將使用ffmpeg提取每幀視頻的運動向量MV,並使用opencv將其繪製出來。

運動向量MV

瞭解視頻編碼的人都對運動向量不陌生,它是在進行幀間預測時標記當前塊和參考塊位置關係的一個向量。幀間預測包括單向預測(P幀)和雙向預測(B幀),單向預測只需要一個MV,雙向預測需要兩個MV。

MV在ffmpeg中的定義如下:

typedef struct AVMotionVector {
       /**
        * Where the current macroblock comes from; negative value when it comes
        * from the past, positive value when it comes from the future.
        * XXX: set exact relative ref frame reference instead of a +/- 1 "direction".
        */
       //表明參考塊在前面幀(負)還是後面幀(正)
       int32_t source;
        /**
        * Width and height of the block.
         */
       //所屬塊的寬和高
       uint8_t w, h;
       /**
        * Absolute source position. Can be outside the frame area.
         */
       int16_t src_x, src_y;
       /**
         * Absolute destination position. Can be outside the frame area.
         */
        int16_t dst_x, dst_y;
        /**
         * Extra flag information.
         * Currently unused.
         */
        uint64_t flags;
        /**
         * Motion vector
         * src_x = dst_x + motion_x / motion_scale
         * src_y = dst_y + motion_y / motion_scale
         */
       int32_t motion_x, motion_y;
        uint16_t motion_scale;
    } AVMotionVector;

ffmpeg的示例代碼中提供了mv提取的實例程序,可以在提取mv後使用opencv將其繪製在圖像上。

extern "C"
{
#include <libavutil/motion_vector.h>
#include <libavformat/avformat.h>
}
#include <opencv.hpp>
using namespace cv;
​
static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL;
static AVStream *video_stream = NULL;
static const char *src_filename = NULL;
​
static int video_stream_idx = -1;
static AVFrame *frame = NULL;
static int video_frame_count = 0;
​
FILE *fout;
VideoWriter out;
​
static int decode_packet(const AVPacket *pkt)
{
    int ret = avcodec_send_packet(video_dec_ctx, pkt);
    if (ret < 0) {
        printf("Error while sending a packet to the decoder: %s\n");
        return ret;
    }
​
    while (ret >= 0)  {
        ret = avcodec_receive_frame(video_dec_ctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            break;
        }
        else if (ret < 0) {
            printf("Error while receiving a frame from the decoder: %s\n");
            return ret;
        }
​
        if (ret >= 0) {
            int i;
            AVFrameSideData *sd;
​
            video_frame_count++;
            sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
            //獲取每幀數據
            cv::Mat yuvImg;
            yuvImg.create(frame->height * 3 / 2, frame->width, CV_8UC1);
            memcpy(yuvImg.data, frame->data[0], frame->linesize[0] * frame->height*sizeof(uint8_t));
            memcpy(yuvImg.data + frame->linesize[0] * frame->height*sizeof(uint8_t), frame->data[1], frame->linesize[1] * frame->height/2*sizeof(uint8_t));
            memcpy(yuvImg.data + (frame->linesize[0] * frame->height + frame->linesize[1] * frame->height / 2)*sizeof(uint8_t), frame->data[2], frame->linesize[2] * frame->height / 2 * sizeof(uint8_t));
            cv::Mat rgbImg;
            cv::cvtColor(yuvImg, rgbImg, CV_YUV2BGR_I420);
            if (sd) {
                const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
                for (i = 0; i < sd->size / sizeof(*mvs); i++) {
                    const AVMotionVector *mv = &mvs[i];
                    //繪製mv
                    line(rgbImg, Point(mv->src_x, mv->src_y), Point(mv->dst_x, mv->dst_y), Scalar(0, 0, 255));
                }
            }
            //將帶mv的幀寫入文件
            out << rgbImg;
            av_frame_unref(frame);
        }
    }
​
    return 0;
}
​
static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
{
    int ret;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    AVDictionary *opts = NULL;
​
    ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
            av_get_media_type_string(type), src_filename);
        return ret;
    }
    else {
        int stream_idx = ret;
        st = fmt_ctx->streams[stream_idx];
​
        dec_ctx = avcodec_alloc_context3(dec);
        if (!dec_ctx) {
            fprintf(stderr, "Failed to allocate codec\n");
            return AVERROR(EINVAL);
        }
​
        ret = avcodec_parameters_to_context(dec_ctx, st->codecpar);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy codec parameters to codec context\n");
            return ret;
        }
​
        /* Init the video decoder */
        av_dict_set(&opts, "flags2", "+export_mvs", 0);
        if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
            fprintf(stderr, "Failed to open %s codec\n",
                av_get_media_type_string(type));
            return ret;
        }
​
        video_stream_idx = stream_idx;
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = dec_ctx;
    }
​
    return 0;
}
​
int main(int argc, char **argv)
{
    fout = fopen("out.yuv","wb");
    //out.open("out.avi", CV_FOURCC('X', 'V', 'I', 'D'),25, Size(640, 272));
    out.open("out.mp4", CV_FOURCC('D', 'I', 'V', 'X'), 25, Size(640, 272));
    int ret = 0;
    AVPacket pkt = { 0 };
​
    if (argc != 2) {
        fprintf(stderr, "Usage: %s <video>\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];
​
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }
​
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }
​
    open_codec_context(fmt_ctx, AVMEDIA_TYPE_VIDEO);
​
    av_dump_format(fmt_ctx, 0, src_filename, 0);
​
    if (!video_stream) {
        fprintf(stderr, "Could not find video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }
​
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
​
    printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
​
    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        if (pkt.stream_index == video_stream_idx)
            ret = decode_packet(&pkt);
        av_packet_unref(&pkt);
        if (ret < 0)
            break;
    }
​
    /* flush cached frames */
    decode_packet(NULL);
​
end:
    avcodec_free_context(&video_dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    fclose(fout);
    system("pause");
    return ret < 0;
}

問題

ffmpeg中提取的mv有幾個問題:

  • 沒有給出宏塊在圖像中的位置

  • 對於雙向預測沒有特別指出其兩個mv

  • 沒有指出其具體的參考圖像

感興趣的請關注微信公衆號Video Coding

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章