FFMPEG音視頻同步-讀取攝像頭並編碼封裝保存

FFMPEG讀取攝像頭並編碼封裝保存

//-------------------------------------------------------------------------------------------------
參考鏈接1、https://blog.csdn.net/leixiaohua1020/article/details/39702113
參考鏈接2、https://blog.csdn.net/li_wen01/article/details/67631687

//-------------------------------------------------------------------------------------------------
音視頻同步錄製相關文章
//-------------------------------------------------------------------------------------------------
1、 ffmpeg-攝像頭採集保存
2、 ffmpeg音視頻同步-攝像頭採集編碼封裝
3、 ffmpeg-音頻正弦產生並編碼封裝
4、 ffmpeg-音頻實時採集保存
5、 ffmpeg-音頻實時採集編碼封裝
6、 ffmpeg音視頻同步-音視頻實時採集編碼封裝
7、 ffmpeg音視頻同步-音視頻實時採集編碼推流
8、 ffmpeg音視頻同步-音視頻實時採集編碼推流-優化版本
//---------------------------------------------------------------

系統環境:
系統版本:lubuntu 16.04
Ffmpge版本:ffmpeg version N-93527-g1125277
攝像頭:1.3M HD WebCan
虛擬機:Oracle VM VirtualBox 5.2.22

ffmpeg 攝像頭採集指令相關:

指令查看設備 ffmpeg -devices
指令錄製ffmpeg -f video4linux2 -s 640*480 -i /dev/video0 -f flv test.flv
上面的-f flv 指定了封裝器名稱,支持的封裝器可以在/libavformat/muxer_list.c查看。指定了封裝器,不管文件後綴是什麼,都會採用指定的封裝器來封裝文件。

指令指定編碼器H264,幀率25,錄製攝像頭
ffmpeg -f video4linux2 -r 25 -s 640*480 -i /dev/video0 -f flv -vcodec libx264 test.flv

上面的 –vcodec 指定了編碼器。編碼器列表在/libavcodec/codec_list.c,在該結構體下可以看到對應的編碼器名稱。
另外,編碼器desc,在/libavcodec/codec_desc.c下,一般ffprob test.flv,看到的視頻流編碼器信息,就是從這裏獲取。編碼器描述與編碼器通過id連接起來(ps:AV_CODEC_ID_H264)。值得注意的地方是:封裝器要和編碼器搭配上

名稱 推出機構 流媒體 支持的視頻編碼 支持的音頻編碼 目前使用領域
AVI Microsoft Inc. 不支持 幾乎所有格式 幾乎所有格式 BT下載影視
MP4 MPEG 支持 MPEG-2, MPEG-4, H.264, H.263等 AAC, MPEG-1 Layers I, II, III, AC-3等 互聯網視頻網站
TS MPEG 支持 MPEG-1, MPEG-2, MPEG-4, H.264 MPEG-1 Layers I, II, III, AAC, IPTV,數字電視
FLV Adobe Inc. 支持 Sorenson, VP6, H.264 MP3, ADPCM, Linear PCM, AAC等 互聯網視頻網站
MKV CoreCodec Inc. 支持 幾乎所有格式 幾乎所有格式 互聯網視頻網站
RMVB Real Networks Inc. 支持 RealVideo 8, 9, 10 AAC, Cook Codec, RealAudio Lossless BT下載影視

上表來源於:https://blog.csdn.net/leixiaohua1020/article/details/18893769 截圖

本章文檔基於 《ffmpeg-攝像頭採集保存》採集攝像頭一幀數據並將其轉化爲YUV420,保存下來,再結合源碼/doc/example/muxing.c例子,僅僅對視頻流進行封裝。

1.簡介

FFmpeg中有一個和多媒體設備交互的類庫:Libavdevice。使用這個庫可以讀取電腦(或者其他設備上)的多媒體設備的數據,或者輸出數據到指定的多媒體設備上

2.源碼

最簡單的基於Libavdevice的攝像頭數據讀取一幀幀YUV數據,經過H.264編碼,封裝成並保存成test.mp4文件:

1.	#include <stdlib.h>  
2.	#include <stdio.h>  
3.	#include <string.h>  
4.	#include <math.h>  
5.	  
6.	#include <libavutil/avassert.h>  
7.	#include <libavutil/channel_layout.h>  
8.	#include <libavutil/opt.h>  
9.	#include <libavutil/mathematics.h>  
10.	#include <libavutil/timestamp.h>  
11.	#include <libavformat/avformat.h>  
12.	#include <libswscale/swscale.h>  
13.	#include <libswresample/swresample.h>  
14.	  
15.	#define STREAM_DURATION   10.0  
16.	#define STREAM_FRAME_RATE 25 /* 25 images/s */  
17.	#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */  
18.	  
19.	#define SCALE_FLAGS SWS_BICUBIC  
20.	  
21.	// a wrapper around a single output AVStream  
22.	typedef struct OutputStream {  
23.	    AVStream *st;  
24.	    AVCodecContext *enc;  
25.	  
26.	    /* pts of the next frame that will be generated */  
27.	    int64_t next_pts;  
28.	    int samples_count;  
29.	  
30.	    AVFrame *frame;  
31.	    AVFrame *tmp_frame;  
32.	  
33.	    float t, tincr, tincr2;  
34.	  
35.	    struct SwsContext *sws_ctx;  
36.	    struct SwrContext *swr_ctx;  
37.	} OutputStream;  
38.	  
39.	  
40.	typedef struct IntputDev {  
41.	  
42.	    AVCodecContext  *pCodecCtx;  
43.	    AVCodec         *pCodec;  
44.	    AVFormatContext *v_ifmtCtx;  
45.	    int  videoindex;  
46.	    struct SwsContext *img_convert_ctx;  
47.	    AVPacket *in_packet;  
48.	    AVFrame *pFrame,*pFrameYUV;  
49.	}IntputDev;  
50.	  
51.	static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)  
52.	{  
53.	    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;  
54.	  
55.	    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",  
56.	           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),  
57.	           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),  
58.	           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),  
59.	           pkt->stream_index);  
60.	}  
61.	  
62.	static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)  
63.	{  
64.	    /* rescale output packet timestamp values from codec to stream timebase */  
65.	    av_packet_rescale_ts(pkt, *time_base, st->time_base);  
66.	    pkt->stream_index = st->index;  
67.	  
68.	    /* Write the compressed frame to the media file. */  
69.	    log_packet(fmt_ctx, pkt);  
70.	    return av_interleaved_write_frame(fmt_ctx, pkt);  
71.	}  
72.	  
73.	/* Add an output stream. */  
74.	static void add_stream(OutputStream *ost, AVFormatContext *oc,  
75.	                       AVCodec **codec,  
76.	                       enum AVCodecID codec_id)  
77.	{  
78.	    AVCodecContext *c;  
79.	    int i;  
80.	  
81.	    /* find the encoder */  
82.	    *codec = avcodec_find_encoder(codec_id);  
83.	    if (!(*codec)) {  
84.	        fprintf(stderr, "Could not find encoder for '%s'\n",  
85.	                avcodec_get_name(codec_id));  
86.	        exit(1);  
87.	    }  
88.	  
89.	    ost->st = avformat_new_stream(oc, NULL);  
90.	    if (!ost->st) {  
91.	        fprintf(stderr, "Could not allocate stream\n");  
92.	        exit(1);  
93.	    }  
94.	    ost->st->id = oc->nb_streams-1;  
95.	    c = avcodec_alloc_context3(*codec);  
96.	    if (!c) {  
97.	        fprintf(stderr, "Could not alloc an encoding context\n");  
98.	        exit(1);  
99.	    }  
100.	    ost->enc = c;  
101.	  
102.	    switch ((*codec)->type) {  
103.	    case AVMEDIA_TYPE_AUDIO:  
104.	        c->sample_fmt  = (*codec)->sample_fmts ?  
105.	            (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;  
106.	        c->bit_rate    = 64000;  
107.	        c->sample_rate = 44100;  
108.	        if ((*codec)->supported_samplerates) {  
109.	            c->sample_rate = (*codec)->supported_samplerates[0];  
110.	            for (i = 0; (*codec)->supported_samplerates[i]; i++) {  
111.	                if ((*codec)->supported_samplerates[i] == 44100)  
112.	                    c->sample_rate = 44100;  
113.	            }  
114.	        }  
115.	        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
116.	        c->channel_layout = AV_CH_LAYOUT_STEREO;  
117.	        if ((*codec)->channel_layouts) {  
118.	            c->channel_layout = (*codec)->channel_layouts[0];  
119.	            for (i = 0; (*codec)->channel_layouts[i]; i++) {  
120.	                if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)  
121.	                    c->channel_layout = AV_CH_LAYOUT_STEREO;  
122.	            }  
123.	        }  
124.	        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);  
125.	        ost->st->time_base = (AVRational){ 1, c->sample_rate };  
126.	        break;  
127.	  
128.	    case AVMEDIA_TYPE_VIDEO:  
129.	        c->codec_id = codec_id;  
130.	  
131.	        c->bit_rate = 400000;  
132.	        /* Resolution must be a multiple of two. */  
133.	        c->width    = 640;  
134.	        c->height   = 480;  
135.	        /* timebase: This is the fundamental unit of time (in seconds) in terms 
136.	         * of which frame timestamps are represented. For fixed-fps content, 
137.	         * timebase should be 1/framerate and timestamp increments should be 
138.	         * identical to 1. */  
139.	        ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };  
140.	        c->time_base       = ost->st->time_base;  
141.	  
142.	        c->gop_size      = 12; /* emit one intra frame every twelve frames at most */  
143.	        c->pix_fmt       = STREAM_PIX_FMT;  
144.	        if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {  
145.	            /* just for testing, we also add B-frames */  
146.	            c->max_b_frames = 2;  
147.	        }  
148.	        if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {  
149.	            /* Needed to avoid using macroblocks in which some coeffs overflow. 
150.	             * This does not happen with normal video, it just happens here as 
151.	             * the motion of the chroma plane does not match the luma plane. */  
152.	            c->mb_decision = 2;  
153.	        }  
154.	    break;  
155.	  
156.	    default:  
157.	        break;  
158.	    }  
159.	  
160.	    /* Some formats want stream headers to be separate. */  
161.	    if (oc->oformat->flags & AVFMT_GLOBALHEADER)  
162.	        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;  
163.	}  
164.	  
165.	/**************************************************************/  
166.	  
167.	  
168.	/**************************************************************/  
169.	/* video output */  
170.	  
171.	static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)  
172.	{  
173.	    AVFrame *picture;  
174.	    int ret;  
175.	  
176.	    picture = av_frame_alloc();  
177.	    if (!picture)  
178.	        return NULL;  
179.	  
180.	    picture->format = pix_fmt;  
181.	    picture->width  = width;  
182.	    picture->height = height;  
183.	  
184.	    /* allocate the buffers for the frame data */  
185.	    ret = av_frame_get_buffer(picture, 32);  
186.	    if (ret < 0) {  
187.	        fprintf(stderr, "Could not allocate frame data.\n");  
188.	        exit(1);  
189.	    }  
190.	  
191.	    return picture;  
192.	}  
193.	  
194.	static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)  
195.	{  
196.	    int ret;  
197.	    AVCodecContext *c = ost->enc;  
198.	    AVDictionary *opt = NULL;  
199.	  
200.	    av_dict_copy(&opt, opt_arg, 0);  
201.	  
202.	    /* open the codec */  
203.	    ret = avcodec_open2(c, codec, &opt);  
204.	    av_dict_free(&opt);  
205.	    if (ret < 0) {  
206.	        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));  
207.	        exit(1);  
208.	    }  
209.	  
210.	    /* allocate and init a re-usable frame */  
211.	    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);  
212.	    if (!ost->frame) {  
213.	        fprintf(stderr, "Could not allocate video frame\n");  
214.	        exit(1);  
215.	    }  
216.	  
217.	  
218.	        printf("ost->frame alloc success fmt=%d w=%d h=%d\n",c->pix_fmt,c->width, c->height);  
219.	  
220.	  
221.	    /* If the output format is not YUV420P, then a temporary YUV420P 
222.	     * picture is needed too. It is then converted to the required 
223.	     * output format. */  
224.	    ost->tmp_frame = NULL;  
225.	    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {  
226.	        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);  
227.	        if (!ost->tmp_frame) {  
228.	            fprintf(stderr, "Could not allocate temporary picture\n");  
229.	            exit(1);  
230.	        }  
231.	    }  
232.	  
233.	    /* copy the stream parameters to the muxer */  
234.	    ret = avcodec_parameters_from_context(ost->st->codecpar, c);  
235.	    if (ret < 0) {  
236.	        fprintf(stderr, "Could not copy the stream parameters\n");  
237.	        exit(1);  
238.	    }  
239.	}  
240.	  
241.	  
242.	  
243.	  
244.	/* 
245.	 * encode one video frame and send it to the muxer 
246.	 * return 1 when encoding is finished, 0 otherwise 
247.	 */  
248.	static int write_video_frame1(AVFormatContext *oc, OutputStream *ost,AVFrame *frame)  
249.	{  
250.	    int ret;  
251.	    AVCodecContext *c;  
252.	    int got_packet = 0;  
253.	    AVPacket pkt = { 0 };  
254.	  
255.	    if(frame==NULL)  
256.	        return 1;  
257.	  
258.	  
259.	    c = ost->enc;  
260.	  
261.	  
262.	    av_init_packet(&pkt);  
263.	  
264.	  
265.	    /* encode the image */  
266.	    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);  
267.	    if (ret < 0) {  
268.	        fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));  
269.	        exit(1);  
270.	    }  
271.	  
272.	  
273.	    printf("--------------video- pkt.pts=%s\n",av_ts2str(pkt.pts));  
274.	  
275.	        printf("----st.num=%d st.den=%d codec.num=%d codec.den=%d---------\n",ost->st->time_base.num,ost->st->time_base.den,  
276.	            c->time_base.num,c->time_base.den);  
277.	  
278.	  
279.	    if (got_packet) {  
280.	        ret = write_frame(oc, &c->time_base, ost->st, &pkt);  
281.	    } else {  
282.	        ret = 0;  
283.	    }  
284.	  
285.	    if (ret < 0) {  
286.	        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));  
287.	        exit(1);  
288.	    }  
289.	  
290.	    return (frame || got_packet) ? 0 : 1;  
291.	}  
292.	  
293.	  
294.	static AVFrame *get_video_frame1(OutputStream *ost,IntputDev* input,int *got_pic)  
295.	{  
296.	  
297.	    int ret, got_picture;  
298.	        AVCodecContext *c = ost->enc;  
299.	    AVFrame * ret_frame=NULL;  
300.	    if (av_compare_ts(ost->next_pts, c->time_base,  
301.	                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)  
302.	        return NULL;  
303.	  
304.	    /* when we pass a frame to the encoder, it may keep a reference to it 
305.	     * internally; make sure we do not overwrite it here */  
306.	    if (av_frame_make_writable(ost->frame) < 0)  
307.	        exit(1);  
308.	      
309.	  
310.	    if(av_read_frame(input->v_ifmtCtx, input->in_packet)>=0){  
311.	        if(input->in_packet->stream_index==input->videoindex){  
312.	            ret = avcodec_decode_video2(input->pCodecCtx, input->pFrame, &got_picture, input->in_packet);  
313.	            *got_pic=got_picture;  
314.	  
315.	            if(ret < 0){  
316.	                printf("Decode Error.\n");  
317.	                av_free_packet(input->in_packet);  
318.	                return NULL;  
319.	            }  
320.	            if(got_picture){  
321.	                //sws_scale(input->img_convert_ctx, (const unsigned char* const*)input->pFrame->data, input->pFrame->linesize, 0, input->pCodecCtx->height, ost->frame->data, ost->frame->linesize);  
322.	                sws_scale(input->img_convert_ctx, (const unsigned char* const*)input->pFrame->data, input->pFrame->linesize, 0, input->pCodecCtx->height, ost->frame->data,  ost->frame->linesize);  
323.	                ost->frame->pts =ost->next_pts++;  
324.	                ret_frame= ost->frame;  
325.	                  
326.	            }  
327.	        }  
328.	        av_free_packet(input->in_packet);  
329.	    }  
330.	    return ret_frame;  
331.	}  
332.	static void close_stream(AVFormatContext *oc, OutputStream *ost)  
333.	{  
334.	    avcodec_free_context(&ost->enc);  
335.	    av_frame_free(&ost->frame);  
336.	    av_frame_free(&ost->tmp_frame);  
337.	    sws_freeContext(ost->sws_ctx);  
338.	    swr_free(&ost->swr_ctx);  
339.	}  
340.	  
341.	/**************************************************************/  
342.	/* media file output */  
343.	  
344.	int main(int argc, char **argv)  
345.	{  
346.	    OutputStream video_st = { 0 }, audio_st = { 0 };  
347.	    const char *filename;  
348.	    AVOutputFormat *fmt;  
349.	    AVFormatContext *oc;  
350.	    AVCodec *audio_codec, *video_codec;  
351.	    int ret;  
352.	    int have_video = 0, have_audio = 0;  
353.	    int encode_video = 0, encode_audio = 0;  
354.	    AVDictionary *opt = NULL;  
355.	    int i;  
356.	  
357.	    if (argc < 2) {  
358.	        printf("usage: %s output_file\n"  
359.	               "API example program to output a media file with libavformat.\n"  
360.	               "This program generates a synthetic audio and video stream, encodes and\n"  
361.	               "muxes them into a file named output_file.\n"  
362.	               "The output format is automatically guessed according to the file extension.\n"  
363.	               "Raw images can also be output by using '%%d' in the filename.\n"  
364.	               "\n", argv[0]);  
365.	        return 1;  
366.	    }  
367.	  
368.	    filename = argv[1];  
369.	    for (i = 2; i+1 < argc; i+=2) {  
370.	        if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))  
371.	            av_dict_set(&opt, argv[i]+1, argv[i+1], 0);  
372.	    }  
373.	  
374.	    /* allocate the output media context */  
375.	    avformat_alloc_output_context2(&oc, NULL, NULL, filename);  
376.	    if (!oc) {  
377.	        printf("Could not deduce output format from file extension: using MPEG.\n");  
378.	        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);  
379.	    }  
380.	    if (!oc)  
381.	        return 1;  
382.	  
383.	  
384.	  
385.	//********add camera read***********//  
386.	    IntputDev video_input = { 0 };  
387.	    AVCodecContext  *pCodecCtx;  
388.	    AVCodec         *pCodec;  
389.	       AVFormatContext *v_ifmtCtx;  
390.	  
391.	//Register Device  
392.	    avdevice_register_all();  
393.	  
394.	    v_ifmtCtx = avformat_alloc_context();  
395.	  
396.	  
397.	     //Linux  
398.	    AVInputFormat *ifmt=av_find_input_format("video4linux2");  
399.	    if(avformat_open_input(&v_ifmtCtx,"/dev/video0",ifmt,NULL)!=0){  
400.	        printf("Couldn't open input stream./dev/video0\n");  
401.	        return -1;  
402.	    }  
403.	   
404.	   
405.	    if(avformat_find_stream_info(v_ifmtCtx,NULL)<0)  
406.	    {  
407.	        printf("Couldn't find stream information.\n");  
408.	        return -1;  
409.	    }  
410.	  
411.	    int videoindex=-1;  
412.	    for(i=0; i<v_ifmtCtx->nb_streams; i++)   
413.	    if(v_ifmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)  
414.	    {  
415.	        videoindex=i;  
416.	        break;  
417.	    }  
418.	    if(videoindex==-1)  
419.	    {  
420.	        printf("Couldn't find a video stream.\n");  
421.	        return -1;  
422.	    }  
423.	          
424.	    pCodecCtx=v_ifmtCtx->streams[videoindex]->codec;  
425.	    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);  
426.	    if(pCodec==NULL)  
427.	    {  
428.	        printf("Codec not found.\n");  
429.	        return -1;  
430.	    }  
431.	    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)  
432.	    {  
433.	        printf("Could not open codec.\n");  
434.	        return -1;  
435.	    }  
436.	  
437.	    AVFrame *pFrame,*pFrameYUV;  
438.	    pFrame=av_frame_alloc();  
439.	    pFrameYUV=av_frame_alloc();  
440.	    unsigned char *out_buffer=(unsigned char *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));  
441.	    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);  
442.	  
443.	    printf("camera width=%d height=%d \n",pCodecCtx->width, pCodecCtx->height);  
444.	  
445.	  
446.	    struct SwsContext *img_convert_ctx;  
447.	    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);   
448.	    AVPacket *in_packet=(AVPacket *)av_malloc(sizeof(AVPacket));  
449.	  
450.	  
451.	    video_input.img_convert_ctx=img_convert_ctx;  
452.	    video_input.in_packet=in_packet;  
453.	  
454.	    video_input.pCodecCtx=pCodecCtx;  
455.	    video_input.pCodec=pCodec;  
456.	       video_input.v_ifmtCtx=v_ifmtCtx;  
457.	    video_input.videoindex=videoindex;  
458.	    video_input.pFrame=pFrame;  
459.	    video_input.pFrameYUV=pFrameYUV;  
460.	  
461.	//******************************//  
462.	  
463.	    fmt = oc->oformat;  
464.	  
465.	    /* Add the audio and video streams using the default format codecs 
466.	     * and initialize the codecs. */  
467.	  
468.	        printf( "fmt->video_codec = %d\n", fmt->video_codec);  
469.	  
470.	    if (fmt->video_codec != AV_CODEC_ID_NONE) {  
471.	        add_stream(&video_st, oc, &video_codec, fmt->video_codec);  
472.	        have_video = 1;  
473.	        encode_video = 1;  
474.	    }  
475.	  
476.	    /* Now that all the parameters are set, we can open the audio and 
477.	     * video codecs and allocate the necessary encode buffers. */  
478.	    if (have_video)  
479.	        open_video(oc, video_codec, &video_st, opt);  
480.	  
481.	  
482.	    av_dump_format(oc, 0, filename, 1);  
483.	  
484.	    /* open the output file, if needed */  
485.	    if (!(fmt->flags & AVFMT_NOFILE)) {  
486.	        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);  
487.	        if (ret < 0) {  
488.	            fprintf(stderr, "Could not open '%s': %s\n", filename,  
489.	                    av_err2str(ret));  
490.	            return 1;  
491.	        }  
492.	    }  
493.	  
494.	    /* Write the stream header, if any. */  
495.	    ret = avformat_write_header(oc, &opt);  
496.	    if (ret < 0) {  
497.	        fprintf(stderr, "Error occurred when opening output file: %s\n",  
498.	                av_err2str(ret));  
499.	        return 1;  
500.	    }  
501.	  
502.	    int got_pic;  
503.	  
504.	    while (encode_video ) {  
505.	        /* select the stream to encode */  
506.	            //encode_video = !write_video_frame(oc, &video_st);  
507.	        AVFrame *frame=get_video_frame1(&video_st,&video_input,&got_pic);  
508.	        if(!got_pic)  
509.	        {  
510.	            usleep(10000);  
511.	            continue;  
512.	  
513.	        }  
514.	        encode_video = !write_video_frame1(oc, &video_st,frame);  
515.	    }  
516.	  
517.	  
518.	    /* Write the trailer, if any. The trailer must be written before you 
519.	     * close the CodecContexts open when you wrote the header; otherwise 
520.	     * av_write_trailer() may try to use memory that was freed on 
521.	     * av_codec_close(). */  
522.	    av_write_trailer(oc);  
523.	  
524.	    sws_freeContext(video_input.img_convert_ctx);  
525.	  
526.	    
527.	    avcodec_close(video_input.pCodecCtx);  
528.	    av_free(video_input.pFrameYUV);  
529.	    av_free(video_input.pFrame);      
530.	    avformat_close_input(&video_input.v_ifmtCtx);  
531.	      
532.	  
533.	    /* Close each codec. */  
534.	    if (have_video)  
535.	        close_stream(oc, &video_st);  
536.	  
537.	    if (!(fmt->flags & AVFMT_NOFILE))  
538.	        /* Close the output file. */  
539.	        avio_closep(&oc->pb);  
540.	  
541.	    /* free the stream */  
542.	    avformat_free_context(oc);  
543.	  
544.	    return 0;  
545.	}  

3.驗證

3.1 編譯

1.	#!/bin/sh  
2.	export PKG_CONFIG_PATH=/home/quange/ffmpeg_build/lib/pkgconfig/:$PKG_CONFIG_PATH  
3.	gcc ffmpeg_get_camera_muxing.c -g -o ffmpeg_get_camera_muxing.out  -lSDLmain -lSDL  `pkg-config "libavcodec" --cflags --libs` `pkg-config "libavformat" --cflags --libs` `pkg-config "libavutil" --cflags --libs` `pkg-config "libswscale" --cflags --libs` `pkg-config "libavdevice" --cflags --libs`

3.2 結果測試

使用軟件vlc打開test.mp4
test.mp4

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章