讀取筆記本的攝像頭的原始yuv數據,通過libav(ffmpeg編碼)

一、程序的邏輯主要分兩部分:

1、通過video4linux2讀取攝像頭的V4L2_PIX_FMT_YUYV格式的原始數據

2、把V4L2_PIX_FMT_YUYV格式的數據轉換成AV_PIX_FMT_YUV422P格式的yuv數據,並存放在AVFrame結構中;         把AVFrame結構送到編碼器;

      收取編碼後的h264數據流,並存到文件中

二、代碼中主要用到的庫:

1、通過一個叫uvccapture-0.5庫(我直接改了裏面的代碼),獲取攝像頭的V4L2_PIX_FMT_YUYV格式數據。

2、通過libav編碼

三、主要代碼:

/*
 * main.cpp
 *
 *  Created on: 2014-3-2
 *      Author: xy
 */

#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <linux/videodev2.h>
//ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
#ifdef __cplusplus
extern "C" {
#endif
#define __STDC_CONSTANT_MACROS
#ifdef _STDINT_H
#undef _STDINT_H
#endif
# include <stdint.h>
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>

#ifdef __cplusplus
}
#endif
//輸入設備
#include "v4l2uvc.h"
int main() {
	//v4l2 val
	char *videodevice = "/dev/video0";
	int width = 640; //320;
	int height = 480; //240;
	int brightness = 0, contrast = 0, saturation = 0, gain = 0;
	int quality = 95;
	int format = V4L2_PIX_FMT_YUYV;
	struct vdIn *videoIn;
	int grabmethod = 1;

	//video encoder init
	avcodec_register_all();

	AVCodec *codec;
	AVCodecContext *c = NULL;
	int i, ret, x, y, got_output;
	FILE *f;
	AVFrame *frame;
	AVPacket pkt;
	uint8_t endcode[] = { 0, 0, 1, 0xb7 };
	char filename[] = "test.264";
	printf("Encode video file %s\n", filename);

	/* find the mpeg1 video encoder */
	codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!codec) {
		fprintf(stderr, "Codec not found\n");
		exit(1);
	}
	c = avcodec_alloc_context3(codec);
	if (!c) {
		fprintf(stderr, "Could not allocate video codec context\n");
		exit(1);
	}
	/* put sample parameters */
	c->bit_rate = 400000;
	/* resolution must be a multiple of two */
	c->width = width; // 352;
	c->height = height; // 288;
	/* frames per second */
	c->time_base = (AVRational) {1,10/*25*/};
	c->gop_size = 10; /* emit one intra frame every ten frames */
	c->max_b_frames = 0; //1
	c->pix_fmt = AV_PIX_FMT_YUV422P; //v4l2是這個格式  AV_PIX_FMT_YUV420P;

	//av_opt_set(c->priv_data, "preset", "slow", 0);
	av_opt_set(c->priv_data, "tune", "zerolatency", 0); //這個可以讓libav不緩存視頻幀
			/********************************************************************************
			 * 有兩個地方影響libav是不是緩存編碼後的視頻幀,也就是影響實時性:
			 * 1、av_opt_set(c->priv_data, "tune", "zerolatency", 0);這個比較主要。
			 * 2、參數中有c->max_b_frames = 1;如果這個幀設爲0,就沒有B幀了,編碼會很快的。
			 ********************************************************************************/

	/* open it */
	if (avcodec_open2(c, codec, NULL) < 0) {
		fprintf(stderr, "Could not open codec\n");
		exit(1);
	}

	f = fopen(filename, "wb");
	if (!f) {
		fprintf(stderr, "Could not open %s\n", filename);
		exit(1);
	}

	frame = av_frame_alloc();
	if (!frame) {
		fprintf(stderr, "Could not allocate video frame\n");
		exit(1);
	}
	frame->format = c->pix_fmt;
	frame->width = c->width;
	frame->height = c->height;

	/* the image can be allocated by any means and av_image_alloc() is
	 * just the most convenient way if av_malloc() is to be used */
	ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
			c->pix_fmt, 32);
	if (ret < 0) {
		fprintf(stderr, "Could not allocate raw picture buffer\n");
		exit(1);
	}

	//v4l2 init
	videoIn = (struct vdIn *) calloc(1, sizeof(struct vdIn));
	if (init_videoIn(videoIn, (char *) videodevice, width, height, format,
			grabmethod) < 0)
		exit(1);

	printf("w:%d,h:%d\n", c->width, c->height);
	time_t timep;
	timep = time(NULL);
	printf("%s\n", asctime(gmtime(&timep)));
	for (i = 0; i < 100; i++) {
		//usleep(200000);
		//從v4l2中獲取數據格式爲AV_PIX_FMT_YUV422P
		if (uvcGrab(videoIn) < 0) {
			fprintf(stderr, "Error grabbing\n");
			close_v4l2(videoIn);
			free(videoIn);
			exit(1);
		}
		unsigned char *yuyv = videoIn->framebuffer;
		//把數據複製到libav想要的結構中
		av_init_packet(&pkt);
		pkt.data = NULL; // packet data will be allocated by the encoder
		pkt.size = 0;
		printf("debug!!!");
		fflush(stdout);
#if 0
		/* prepare a dummy image */
		/* Y */
		for (y = 0; y < c->height; y++) {
			for (x = 0; x < c->width; x++) {
				frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
			}
		}

		/* Cb and Cr */
		for (y = 0; y < c->height/2; y++) {
			for (x = 0; x < c->width/2; x++) {
				frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
				frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
			}
		}
#else
		/* prepare  image */
		/* Y  Cb Rb */
		for (y = 0; y < c->height; y++) {
			for (x = 0; x < c->width; x++) {
				frame->data[0][y * frame->linesize[0] + x] = yuyv[y
						* frame->linesize[0] * 2 + 2 * x];
			}
		}
		/* Cb and Cr */
		for (y = 0; y < c->height; y++) {
			for (x = 0; x < c->width / 2; x++) {
				//frame->data[0][y * frame->linesize[0] + 2*x  ] = yuyv[y*frame->linesize[0]*4+4*x];
				//frame->data[0][y * frame->linesize[0] + 2*x+1] = yuyv[y*frame->linesize[0]*4+4*x+2];
				frame->data[1][y * frame->linesize[1] + x] = yuyv[y
						* frame->linesize[1] * 4 + 4 * x + 1];
				frame->data[2][y * frame->linesize[2] + x] = yuyv[y
						* frame->linesize[2] * 4 + 4 * x + 3];
			}
		}
#endif
		frame->pts = i;

		/* encode the image */
		ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
		if (ret < 0) {
			fprintf(stderr, "Error encoding frame\n");
			exit(1);
		}

		if (got_output) {
			printf("Write frame %3d (size=%5d)\n", i, pkt.size);
			fwrite(pkt.data, 1, pkt.size, f);
			av_free_packet(&pkt);
		}
		//編碼
	}
	/* get the delayed frames */
	for (got_output = 1; got_output; i++) {
		fflush(stdout);

		ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
		if (ret < 0) {
			fprintf(stderr, "Error encoding frame\n");
			exit(1);
		}

		if (got_output) {
			printf("Write frame %3d (size=%5d)\n", i, pkt.size);
			fwrite(pkt.data, 1, pkt.size, f);
			av_free_packet(&pkt);
		}
	}
	timep = time(NULL);
	printf("%s\n", asctime(gmtime(&timep)));
	/* add sequence end code to have a real mpeg file */
	fwrite(endcode, 1, sizeof(endcode), f);
	fclose(f);

	avcodec_close(c);
	av_free(c);
	av_freep(&frame->data[0]);
	av_frame_free(&frame);
	printf("111\n");

	free(videoIn);
}

想運行代碼的朋友可以通過這個文章:

http://blog.csdn.net/xyyangkun/article/details/20456725

編譯用到的libav庫

在通過改工程中的makefile:

LIBAVDIR=/home/xy/mywork/av/libav-2014-03-02

指定libav庫的位置就可以了。

使用的讀攝像頭數據的代碼出處:

http://staticwave.ca/source/uvccapture/uvccapture-0.5.tar.bz2

庫的工程在代碼在:

http://download.csdn.net/detail/xyyangkun/6990791


所在工程已上傳到github:

https://github.com/xyyangkun/read_encoder_sender

提交版本:2e60986a438e731aa53ca7d54bc492c521e7b5bc

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章