FFMPEG coping with webcam

FFMPEG coping with webcam

Post by jiapei » Thu Sep 03, 2009 12:26 pm

Refer to

http://blog.csdn.net/dayongxie/archive/ ... 50736.aspx


camera.h

#ifndef _CAMERA_H_
#define _CAMER_H_
extern "C"{
#include <ffmpeg/avformat.h>
}
//攝像頭參數控制標識
enum CAMERA_CID{
CID_BRIGHTNESS, //亮度
CID_CONTRAST, //對比度
CID_COLOR, //色度
CID_HUE //色調
};

//攝像頭類
class Ccamera{
public:
char *m_standard; //制式(PAL、NTSC、SECAM)
char *m_device; //設備文件名(e.g. "/dev/video0")
int m_channel; //信道號
int m_width; //寬度
int m_height; //高度
enum PixelFormat m_pix_format; //視頻卡採集格式
AVStream *video_st;
AVFormatContext *fmt_ctx;
AVInputFormat *iformat;
private:
int m_fd; //文件描述符
public:
Ccamera();
~Ccamera();

AVStream* open_stream(); //打開設備
int close(); //關閉設備
int control(enum CAMERA_CID ctrlID, int value);
//調節攝像頭參數(亮度等)
};

#endif

實 現:camera.cpp

#include <iostream>

extern "C"{
#include <errno.h>
#include <linux/videodev2.h>
#include <sys/ioctl.h>
#include <ffmpeg/avcodec.h>
#include <ffmpeg/avformat.h>
}

#include "camera.h"

void my_log(char *msg){
perror(msg);
}


enum io_method {
io_read,
io_mmap,
io_userptr
};
//定義這個結構體是爲了提取被 ffmpeg封裝的打開的設備文件描述符fd

struct video_data{
int fd;
int frame_format;
enum io_method io_method;
int width, height;
int frame_rate;
int frame_rate_base;
int frame_size;
int top_field_first;

int buffers;
void **buf_start;
unsigned int *buf_len;
};

static int xioctl(int fd, int request, void *arg)
{
int ret;
do ret = ioctl(fd, request, arg);
while (-1 == ret && EINTR == errno);
return ret;
}

struct cid_map{
enum CAMERA_CID cid;
uint32_t v4l2_cid;
};

static struct cid_map cid_conversion_table[] = {
{
CID_BRIGHTNESS,
V4L2_CID_BRIGHTNESS,
},
{
CID_CONTRAST,
V4L2_CID_CONTRAST,
},
{
CID_COLOR,
V4L2_CID_SATURATION,
},
{
CID_HUE,
V4L2_CID_HUE,
},
};

static uint32_t cid_v4l2(enum CAMERA_CID ctrlID)
{
int i;
int table_size = sizeof(cid_conversion_table)/sizeof(struct cid_map);
for (i = 0; i < table_size; i++){
if(cid_conversion_table[i].cid == ctrlID){
return cid_conversion_table[i].v4l2_cid;
}
}
return 0;
}

Ccamera::Ccamera()
{
m_channel = 1;
m_width = 640;
m_height = 480;
fmt_ctx = NULL;
video_st = NULL;
iformat = av_find_input_format("video4linux2");
}

Ccamera::~Ccamera()
{
if(video_st != NULL)
close();
}

AVStream* Ccamera::open_stream()
{
AVFormatParameters formatParams;
formatParams.channel = m_channel;
formatParams.standard = m_standard;
formatParams.width = m_width;
formatParams.height = m_height;
formatParams.time_base.num = 1;
formatParams.time_base.den = 30;
formatParams.pix_fmt = m_pix_format;

if(-1 == av_open_input_file(&fmt_ctx, m_device, iformat, 0, &formatParams)){
my_log("Camera open: open input file");
return NULL;
}
#ifdef DEBUG
dump_format(fmt_ctx, 0, m_device, 0);
#endif
struct video_data *s = (video_data*)fmt_ctx->priv_data;
m_fd = s->fd;

video_st = fmt_ctx->streams[0];
return video_st;
}

int Ccamera::close()
{
av_close_input_file(fmt_ctx);
video_st = NULL;
return 0;
}

int Ccamera::control(enum CAMERA_CID ctrlID, int value)
{
struct v4l2_queryctrl queryctrl;
struct v4l2_control control;
uint32_t v4l2_cid = cid_v4l2(ctrlID);

memset(&queryctrl, 0, sizeof(queryctrl));
queryctrl.id = v4l2_cid;
if(-1 == xioctl(m_fd, VIDIOC_QUERYCTRL, &queryctrl)){
if(errno != EINVAL){
my_log("Camera control:VIDIOC_QUREYCTRL");
}else{
my_log("Camera control: not supported cid");
}
}else if(queryctrl.flags & V4L2_CTRL_FLAG_DISABLED){
my_log("Camera control: not supported cid");
}else{
memset(&control, 0, sizeof(control));
control.id = v4l2_cid;
if( 0 == xioctl(m_fd, VIDIOC_G_CTRL, &control)){
control.value = value;
if(-1 == ioctl(m_fd, VIDIOC_S_CTRL, &control) && errno != ERANGE){
my_log("Camera control: VIDIOC_S_CTRL");
}
}else if(errno != EINVAL){
my_log("Camera control: VIDIOC_G_CTRL");
}
}
}


編碼模塊:

定 義:encoder.h

#ifndef _ENCODER_H_
#define _ENCODER_H_
extern "C"{
#include <ffmpeg/avformat.h>
#include <ffmpeg/avcodec.h>
}

class Cencoder
{
public:
int width;
int height;
int bit_rate;
enum PixelFormat pix_fmt;
AVRational time_base;
char *coder_fmt;
char *filename;
int max_b_frames;
private:
AVFormatContext *pFormatCtx;
AVOutputFormat *pOutputFmt;
AVStream *video_st;
int out_size;
uint8_t *video_outbuf;
int video_outbuf_size;
public:
Cencoder();
~Cencoder();
int open();
int encode(AVFrame *pFrame);
};

#endif

實現:encoder.cpp

#include <iostream>

extern "C"{
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <ffmpeg/avformat.h>
#include <ffmpeg/avcodec.h>
}

#include "encoder.h"

using namespace std;

Cencoder::Cencoder()
{
width = 640;
height = 480;
bit_rate = 200000;
time_base.den = 15;
time_base.num = 1;
pix_fmt = PIX_FMT_YUV420P;
filename = "cap.m4v";
video_outbuf_size = 400000;
video_outbuf = (uint8_t*)av_malloc(video_outbuf_size);
max_b_frames = 2;
}

Cencoder::~Cencoder()
{
if(video_st){
avcodec_close(video_st->codec);
av_free(video_outbuf);
}
av_write_trailer(pFormatCtx);
av_freep(video_st);
if(!(pOutputFmt->flags & AVFMT_NOFILE)){
url_fclose(&pFormatCtx->pb);
}
av_free(pFormatCtx);
}

int Cencoder::open()
{
pFormatCtx = av_alloc_format_context();
if(NULL == pFormatCtx)
return -1;
pOutputFmt = guess_format(NULL, filename, NULL);
if(NULL == pOutputFmt ){
perror("guess format");
av_free(pFormatCtx);
return -1;
}
if(CODEC_ID_NONE == pOutputFmt->video_codec){
av_free(pFormatCtx);
av_free(pOutputFmt);
return -1;
}
pFormatCtx->oformat = pOutputFmt;
snprintf(pFormatCtx->filename, sizeof(pFormatCtx->filename), filename);

AVCodecContext *c;
video_st = av_new_stream(pFormatCtx, 0);
if(NULL == video_st){
perror("alloc stream");
return -1;
}
c = video_st->codec;
c->codec_id = pOutputFmt->video_codec;
c->codec_type = CODEC_TYPE_VIDEO;
c->bit_rate = bit_rate;
c->width = width;
c->height = height;
c->gop_size = 12;
c->pix_fmt = pix_fmt;
c->time_base.den = time_base.den;
c->time_base.num = time_base.num;
c->max_b_frames = 2;
if(!strcmp(pOutputFmt->name, "mp4") || !strcmp(pOutputFmt->name, "mov") || !strcmp(pOutputFmt->name, "3gp"))
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
if(av_set_parameters(pFormatCtx, NULL) < 0){
perror("set parameter");
return -1;
}
dump_format(pFormatCtx, 0, filename, 1);

AVCodec *codec;
codec = avcodec_find_encoder(c->codec_id);
if(NULL == codec){
perror("find encoder");
return -1;
}
if(avcodec_open(c, codec) < 0){
perror("open codec");
return -1;
}
if(!(pOutputFmt->flags & AVFMT_NOFILE)){
if(url_fopen(&pFormatCtx->pb, filename, URL_WRONLY) < 0){
perror("open file");
return -1;
}
}
av_write_header(pFormatCtx);
}
/*
int Cencoder::convert(AVPacket *pkt, AVFormatContext *ic)
{
AVPacket packet;
if(pOutputFmt->video_codec == ic->streams[pkt->stream_index]->codec->codec_id){
packet = *pkt;
packet.data = malloc(pkt->size);
packet.size = pkt->size;
ret = av_write_frame(pFormatCtx, &packet);
}else{
av_init_packet(&packet);
*/

int Cencoder::encode(AVFrame *pFrame)
{
int ret;
static int i=0;
AVCodecContext *c;
c = video_st->codec;
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, pFrame);
if(out_size > 0){
AVPacket pkt;
av_init_packet(&pkt);
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index = video_st->index;
pkt.data = video_outbuf;
pkt.size = out_size;
ret = av_write_frame(pFormatCtx, &pkt);
}
return ret;
}

主 程序:capture.cpp

#include <iostream>
extern "C"{
#include <errno.h>
#include <SDL.h>
#include <linux/videodev2.h>
#include <sys/ioctl.h>
#include <ffmpeg/avcodec.h>
#include <ffmpeg/avformat.h>
}
#include "camera.h"
#include "encoder.h"
#define WIDTH 320
#define HEIGHT 240
const int drops = 1;
using namespace std;
int main()
{
av_register_all();
Cencoder encoder;
encoder.width = WIDTH;
encoder.height = HEIGHT;
encoder.time_base.den = 30;
encoder.time_base.num = 1;
encoder.bit_rate = 500000;
encoder.max_b_frames = 2;
Ccamera camera;
if(camera.iformat == NULL){
perror("create camera");
return -1;
}
camera.m_channel = 1;
camera.m_standard = "ntsc";
camera.m_pix_format = PIX_FMT_YUV420P;
camera.m_device = "/dev/video0";
camera.m_width = WIDTH;
camera.m_height = HEIGHT;

AVStream *video_st;
video_st = camera.open_stream();
AVCodecContext *pCodecCtx;
pCodecCtx = video_st->codec;
AVCodec *pCodec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec == NULL){
perror("find decoder");
return -1;
}
if(avcodec_open(pCodecCtx, pCodec)<0){
perror("open decoder");
return -1;
}
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER)){
perror("SDL_Init");
return -1;
}
SDL_Surface *screen;
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
if(!screen){
perror("set screen mode");
return -1;
}
if(-1 == encoder.open())
exit(-1);
AVFrame *pFrame, *pFrameOut;
pFrame = avcodec_alloc_frame();

pFrameOut = avcodec_alloc_frame(); if(pFrameOut == NULL)
return -1;

SDL_Overlay *bmp;
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
SDL_YV12_OVERLAY, screen);

uint8_t *buffer;
int frameFinished;
AVPicture pict;
AVPacket packet;
struct v4l2_queryctrl queryctrl;
struct v4l2_control control;
struct v4l2_standard standard;
buffer = (uint8_t*)malloc(avpicture_get_size(PIX_FMT_YUV420P, WIDTH, HEIGHT));
avpicture_fill((AVPicture*)pFrameOut, buffer, PIX_FMT_YUV420P, WIDTH, HEIGHT);
int incr = 0;
int stdid = 0;
int i=0;
int j=0;
while(av_read_frame(camera.fmt_ctx, &packet)>=0) {
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
if(frameFinished) {
SDL_LockYUVOverlay(bmp);
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];

pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];

img_convert(&pict, PIX_FMT_YUV420P,
(AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
if(++i == drops){
pFrameOut->data[0] = pict.data[0];
pFrameOut->data[1] = pict.data[1];
pFrameOut->data[2] = pict.data[2];
pFrameOut->data[3] = pict.data[3];
pFrameOut->linesize[0] = pict.linesize[0];
pFrameOut->linesize[1] = pict.linesize[1];
pFrameOut->linesize[2] = pict.linesize[2];
pFrameOut->linesize[3] = pict.linesize[3];
encoder.encode(pFrameOut);
i = 0;
}
SDL_UnlockYUVOverlay(bmp);
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
}
av_free_packet(&packet);

SDL_Event event;
SDL_PollEvent(&event);
switch(event.type){
case SDL_KEYDOWN:
switch (event.key.keysym.sym){
case SDLK_p:
break;
case SDLK_n:
break;
case SDLK_q:
SDL_Quit();
return 0;
break;
case SDLK_LEFT:
incr = -256;
break;
case SDLK_RIGHT:
incr = 256;
break;
default:
incr = 0;
break;
}
if(incr < 0){
camera.control(CID_BRIGHTNESS, 0);
}
break;
case SDL_QUIT:
SDL_Quit();
return 0;
break;
default:
break;
}
}
av_free(buffer);
av_free(pFrameOut);
av_free(pFrame);
avcodec_close(pCodecCtx);
return 0;
}


Makefile:

CXX := g++

LIBS := -L./libs -lavformat -lavcodec -lavutil `sdl-config --libs` INCLUDES := `sdl-config --cflags`

CFLAGS := -g

TARGET := capture

OBJS := camera.o $(TARGET).o encoder.o

$(TARGET) : $(OBJS)

$(CXX) -o $@ $(OBJS) $(LIBS)

%.o : %.cpp $(CXX) -c -o $@ $< $(CFLAGS) $(INCLUDES)

clean : rm *.o $(TARGET)

程序參考了ffmpeg提供的outexample例程、ffplay例程,在此致謝!
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章