FFMPEG音視頻開發: Linux下采集音頻與視頻實時同步編碼保存爲MP4文件(視頻錄製)

一、環境介紹

操作系統介紹:ubuntu 18.04

FFMPEG版本: 4.4.2

攝像頭:  USB攝像頭、虛擬機掛載本機自帶攝像頭

二、FFMPEG、X264庫安裝

參考這篇文章: https://blog.csdn.net/xiaolong1126626497/article/details/104919095

三、代碼思路介紹

代碼裏包含了3個線程:

(1)  攝像頭數據採集線程  

(2)  音頻採集線程

(3)  音頻、視頻編碼線程

攝像頭採集代碼: 使用Linux下標準V4L2框架讀取數據。

聲卡採集代碼:  使用alsa-lib庫獲取聲卡數據。

關於alsa-lib庫的使用,參考這裏:https://blog.csdn.net/xiaolong1126626497/article/details/105368195

音頻視頻編碼部分:編碼部分使用FFMPEG庫的API接口實現。

代碼裏音頻採集部分有一個存放音頻數據的鏈表,音頻採集線程採集數據之後,將數據插入到鏈表裏,編碼線程再從鏈表裏取數據,保護機制採用的互斥鎖。視頻採集部分使用一個全局的緩衝區存放。因爲視頻編碼採集比較耗時,爲了保證音頻能夠實時,這裏必須採用緩衝區的形式存放音頻數據。

四、核心代碼

爲了方便大家複製粘貼,我這裏的代碼全部放到一個.c文件裏。

代碼裏默認以10秒爲間隔保存一個視頻,視頻名稱以當前時間日期命名。

音頻的採樣率44100,單聲道、16位

視頻1秒15幀

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>

#include <stdio.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <poll.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <signal.h>
#include <unistd.h>
#include <string.h>

#include <stdio.h>
#include <stdlib.h>
#include <alsa/asoundlib.h>
#include <signal.h>
#include <pthread.h>

#define STREAM_DURATION   10.0   /*錄製10秒的視頻,由於緩衝的原因,一般只有8秒*/
#define STREAM_FRAME_RATE 15     /* 15 images/s   avfilter_get_by_name */
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC

//固定攝像頭輸出畫面的尺寸
#define VIDEO_WIDTH  640
#define VIDEO_HEIGHT 480

//存放從攝像頭讀出轉換之後的數據
unsigned char YUV420P_Buffer[VIDEO_WIDTH*VIDEO_HEIGHT*3/2];
unsigned char YUV420P_Buffer_temp[VIDEO_WIDTH*VIDEO_HEIGHT*3/2];

/*一些攝像頭需要使用的全局變量*/
unsigned char *image_buffer[4];
int video_fd;
pthread_mutex_t mutex;
pthread_cond_t cond;

/*一些audio需要使用的全局變量*/
pthread_mutex_t mutex_audio;

extern int capture_audio_data_init( char *audio_dev);
extern int capture_audio_data(snd_pcm_t *capture_handle,int buffer_frames);
/*
 進行音頻採集,採集pcm數據並直接保存pcm數據
 音頻參數: 
	 聲道數:		2
	 採樣位數:	16bit、LE格式
	 採樣頻率:	44100Hz
*/
#define AudioFormat SND_PCM_FORMAT_S16_LE  //指定音頻的格式,其他常用格式:SND_PCM_FORMAT_U24_LE、SND_PCM_FORMAT_U32_LE
#define AUDIO_CHANNEL_SET   1  			  //1單聲道   2立體聲
#define AUDIO_RATE_SET 44100   //音頻採樣率,常用的採樣頻率: 44100Hz 、16000HZ、8000HZ、48000HZ、22050HZ
FILE *pcm_data_file=NULL;

int buffer_frames;
snd_pcm_t *capture_handle;
snd_pcm_format_t format=AudioFormat;


//保存音頻數據鏈表
struct AUDIO_DATA
{
	unsigned char* audio_buffer;
	struct AUDIO_DATA *next;
};

//定義一個鏈表頭
struct AUDIO_DATA *list_head=NULL;
struct AUDIO_DATA *List_CreateHead(struct AUDIO_DATA *head);
void List_AddNode(struct AUDIO_DATA *head,unsigned char* audio_buffer);
void List_DelNode(struct AUDIO_DATA *head,unsigned char* audio_buffer);
int List_GetNodeCnt(struct AUDIO_DATA *head);

// 單個輸出AVStream的包裝器
typedef struct OutputStream {
    AVStream *st;
    AVCodecContext *enc;

    /* 下一幀的點數*/
    int64_t next_pts;
    int samples_count;

    AVFrame *frame;
    AVFrame *tmp_frame;

    float t, tincr, tincr2;

    struct SwsContext *sws_ctx;
    struct SwrContext *swr_ctx;
} OutputStream;


static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
    /*將輸出數據包時間戳值從編解碼器重新調整爲流時基 */
    av_packet_rescale_ts(pkt, *time_base, st->time_base);
    pkt->stream_index = st->index;
		
	/*將壓縮的幀寫入媒體文件*/
    return av_interleaved_write_frame(fmt_ctx, pkt);
}


/* 添加輸出流。 */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
                       AVCodec **codec,
                       enum AVCodecID codec_id)
{
    AVCodecContext *c;
    int i;

    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(codec_id));
        exit(1);
    }

    ost->st = avformat_new_stream(oc, NULL);
    if (!ost->st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    ost->st->id = oc->nb_streams-1;
    c = avcodec_alloc_context3(*codec);
    if (!c) {
        fprintf(stderr, "Could not alloc an encoding context\n");
        exit(1);
    }
    ost->enc = c;

    switch ((*codec)->type) {
    case AVMEDIA_TYPE_AUDIO:
        c->sample_fmt  = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
        c->bit_rate    = 64000;  //設置碼率
        c->sample_rate = 44100;  //音頻採樣率
        c->channels= av_get_channel_layout_nb_channels(c->channel_layout);
        c->channel_layout = AV_CH_LAYOUT_MONO; ////AV_CH_LAYOUT_MONO 單聲道   AV_CH_LAYOUT_STEREO 立體聲
        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
        ost->st->time_base = (AVRational){ 1, c->sample_rate };
        break;

    case AVMEDIA_TYPE_VIDEO:
        c->codec_id = codec_id;
		//碼率:影響體積,與體積成正比:碼率越大,體積越大;碼率越小,體積越小。
        c->bit_rate = 400000; //設置碼率 400kps
        /*分辨率必須是2的倍數。 */
        c->width    =VIDEO_WIDTH;
        c->height   = VIDEO_HEIGHT;
        /*時基:這是基本的時間單位(以秒爲單位)
		 *表示其中的幀時間戳。 對於固定fps內容,
		 *時基應爲1 / framerate,時間戳增量應爲
		 *等於1。*/
        ost->st->time_base = (AVRational){1,STREAM_FRAME_RATE};
        c->time_base       = ost->st->time_base;
        c->gop_size      = 12; /* 最多每十二幀發射一幀內幀 */
        c->pix_fmt       = STREAM_PIX_FMT;
        c->max_b_frames = 0;  //不要B幀
        if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) 
		{
            c->mb_decision = 2;
        }
    break;

    default:
        break;
    }

    /* 某些格式希望流頭分開。 */
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}

/**************************************************************/
/* audio output */

static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
    AVFrame *frame = av_frame_alloc();
    frame->format = sample_fmt;
    frame->channel_layout = channel_layout;
    frame->sample_rate = sample_rate;
    frame->nb_samples = nb_samples;
    if(nb_samples)
	{
        av_frame_get_buffer(frame, 0);
    }
    return frame;
}

static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c;
    int nb_samples;
    int ret;
    AVDictionary *opt = NULL;
    c = ost->enc;
    av_dict_copy(&opt, opt_arg, 0);
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
	

    /*下面3行代碼是爲了生成虛擬的聲音設置的頻率參數*/
    ost->t     = 0;
    ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
    ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;

	//AAC編碼這裏就固定爲1024
    nb_samples = c->frame_size;

    ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                       c->sample_rate, nb_samples);
    ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                       c->sample_rate, nb_samples);

    /* copy the stream parameters to the muxer */
    avcodec_parameters_from_context(ost->st->codecpar, c);

    /* create resampler context */
    ost->swr_ctx = swr_alloc();

	/* set options */
    printf("c->channels=%d\n",c->channels);
	av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
	av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
	av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
	av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
	av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
	av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);
    
	/* initialize the resampling context */
	swr_init(ost->swr_ctx);
}

/* 毫秒級 延時 */
void Sleep(int ms)
{
	struct timeval delay;
	delay.tv_sec = 0;
	delay.tv_usec = ms * 1000; // 20 ms
	select(0, NULL, NULL, NULL, &delay);
}


/*
準備虛擬音頻幀
這裏可以替換成從聲卡讀取的PCM數據
*/
static AVFrame *get_audio_frame(OutputStream *ost)
{
    AVFrame *frame = ost->tmp_frame;
    int j, i, v;
    int16_t *q = (int16_t*)frame->data[0];
    /* 檢查我們是否要生成更多幀,用於判斷是否結束*/
    if (av_compare_ts(ost->next_pts, ost->enc->time_base,STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;

   #if 1
	//獲取鏈表節點數量
	int cnt=0;
	while(cnt<=0)
	{
		cnt=List_GetNodeCnt(list_head);
	}
	
	pthread_mutex_lock(&mutex_audio); /*互斥鎖上鎖*/

	//得到節點數據
	struct AUDIO_DATA *tmp=list_head;
	unsigned char *buffer;

	tmp=tmp->next;
	if(tmp==NULL)
	{
		printf("數據爲NULL.\n");
		exit(0);
	}
	buffer=tmp->audio_buffer;
	
	//1024*16*1
	memcpy(q,buffer,frame->nb_samples*sizeof(int16_t)*ost->enc->channels);//將音頻數據拷貝進入frame緩衝區
	
	List_DelNode(list_head,buffer);
	free(buffer);			
    pthread_mutex_unlock(&mutex_audio); /*互斥鎖解鎖*/
	#endif
	
    frame->pts = ost->next_pts;
    ost->next_pts  += frame->nb_samples;
    return frame;
}


/*
 *編碼一個音頻幀並將其發送到多路複用器
 *編碼完成後返回1,否則返回0
 */
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 };
    AVFrame *frame;
    int ret;
    int got_packet;
    int dst_nb_samples;

    av_init_packet(&pkt);
    c = ost->enc;

    frame = get_audio_frame(ost);

    if(frame)
	{
        /*使用重採樣器將樣本從本機格式轉換爲目標編解碼器格式*/
		 /*計算樣本的目標數量*/
		dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
										c->sample_rate, c->sample_rate, AV_ROUND_UP);
		av_assert0(dst_nb_samples == frame->nb_samples);
        av_frame_make_writable(ost->frame);
        /*轉換爲目標格式 */
        swr_convert(ost->swr_ctx,
                    ost->frame->data, dst_nb_samples,
                    (const uint8_t **)frame->data, frame->nb_samples);
        frame = ost->frame;
        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }

    avcodec_encode_audio2(c, &pkt, frame, &got_packet);

    if (got_packet) 
	{
        write_frame(oc, &c->time_base, ost->st, &pkt);
    }
    return (frame || got_packet) ? 0 : 1;
}


static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
    AVFrame *picture;
    int ret;
    picture = av_frame_alloc();
    picture->format = pix_fmt;
    picture->width  = width;
    picture->height = height;

    /* allocate the buffers for the frame data */
    av_frame_get_buffer(picture, 32);
    return picture;
}


static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c = ost->enc;
    AVDictionary *opt = NULL;
    av_dict_copy(&opt, opt_arg, 0);
    /* open the codec */
    avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    ost->tmp_frame = NULL;
    /* 將流參數複製到多路複用器 */
    avcodec_parameters_from_context(ost->st->codecpar, c);
}


/*
準備圖像數據
YUV422佔用內存空間 = w * h * 2
YUV420佔用內存空間 = width*height*3/2
*/
static void fill_yuv_image(AVFrame *pict, int frame_index,int width, int height)
{
	int y_size=width*height;
	/*等待條件成立*/
	pthread_mutex_lock(&mutex);
    pthread_cond_wait(&cond,&mutex);
	memcpy(YUV420P_Buffer_temp,YUV420P_Buffer,sizeof(YUV420P_Buffer));
	/*互斥鎖解鎖*/
	pthread_mutex_unlock(&mutex);
	
    //將YUV數據拷貝到緩衝區  y_size=wXh
	memcpy(pict->data[0],YUV420P_Buffer_temp,y_size);
	memcpy(pict->data[1],YUV420P_Buffer_temp+y_size,y_size/4);
	memcpy(pict->data[2],YUV420P_Buffer_temp+y_size+y_size/4,y_size/4);
}


static AVFrame *get_video_frame(OutputStream *ost)
{
    AVCodecContext *c = ost->enc;

    /* 檢查我們是否要生成更多幀---判斷是否結束錄製 */
      if(av_compare_ts(ost->next_pts, c->time_base,STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;

    /*當我們將幀傳遞給編碼器時,它可能會保留對它的引用
    *內部; 確保我們在這裏不覆蓋它*/
    if (av_frame_make_writable(ost->frame) < 0)
        exit(1);

	//製作虛擬圖像
	//DTS(解碼時間戳)和PTS(顯示時間戳)
    fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
    ost->frame->pts = ost->next_pts++;
    return ost->frame;
}

/*
*編碼一個視頻幀並將其發送到多路複用器
*編碼完成後返回1,否則返回0
*/
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;
    AVPacket pkt = { 0 };
    c=ost->enc;
	//獲取一幀數據
    frame = get_video_frame(ost);
    av_init_packet(&pkt);

    /* 編碼圖像 */
    ret=avcodec_encode_video2(c, &pkt, frame, &got_packet);

    if(got_packet) 
	{
        ret=write_frame(oc, &c->time_base, ost->st, &pkt);
    }
	else
    {
        ret = 0;
    }
    return (frame || got_packet) ? 0 : 1;
}


static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
    avcodec_free_context(&ost->enc);
    av_frame_free(&ost->frame);
    av_frame_free(&ost->tmp_frame);
    sws_freeContext(ost->sws_ctx);
    swr_free(&ost->swr_ctx);
}


//編碼視頻和音頻
int video_audio_encode(char *filename)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;
    int i;

    /* 分配輸出環境 */
    avformat_alloc_output_context2(&oc,NULL,NULL,filename);
    fmt=oc->oformat;
	
     /*使用默認格式的編解碼器添加音頻和視頻流,初始化編解碼器。 */
    if(fmt->video_codec != AV_CODEC_ID_NONE)
	{
        add_stream(&video_st,oc,&video_codec,fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if(fmt->audio_codec != AV_CODEC_ID_NONE)
	{
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

  /*現在已經設置了所有參數,可以打開音頻視頻編解碼器,並分配必要的編碼緩衝區。 */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* 打開輸出文件(如果需要) */
    if(!(fmt->flags & AVFMT_NOFILE)) 
	{
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0)
		{
            fprintf(stderr, "無法打開輸出文件: '%s': %s\n", filename,av_err2str(ret));
            return 1;
        }
    }

    /* 編寫流頭(如果有)*/
    avformat_write_header(oc,&opt);

    while(encode_video || encode_audio)
	{
        /* 選擇要編碼的流*/
        if(encode_video &&(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,audio_st.next_pts, audio_st.enc->time_base) <= 0))
        {
			//printf("視頻編碼一次----->\n");
            encode_video = !write_video_frame(oc,&video_st);
        }
		else 
		{
			//printf("音頻編碼一次----->\n");
            encode_audio = !write_audio_frame(oc,&audio_st);
        }
    }
	
    av_write_trailer(oc);
	
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        avio_closep(&oc->pb);
    avformat_free_context(oc);
    return 0;
}


/*
函數功能: 攝像頭設備初始化
*/
int VideoDeviceInit(char *DEVICE_NAME)
{
	/*1. 打開攝像頭設備*/
	video_fd=open(DEVICE_NAME,O_RDWR);
	if(video_fd<0)return -1;

	/*2. 設置攝像頭支持的顏色格式和輸出的圖像尺寸*/
	struct v4l2_format video_formt;
	memset(&video_formt,0,sizeof(struct v4l2_format));	
	video_formt.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視頻捕獲設備*/
	video_formt.fmt.pix.height=VIDEO_HEIGHT; //480 
	video_formt.fmt.pix.width=VIDEO_WIDTH; //640
	video_formt.fmt.pix.pixelformat=V4L2_PIX_FMT_YUYV;
	if(ioctl(video_fd,VIDIOC_S_FMT,&video_formt))return -2;
	printf("當前攝像頭尺寸:width*height=%d*%d\n",video_formt.fmt.pix.width,video_formt.fmt.pix.height);
	
	/*3.請求申請緩衝區的數量*/
	struct v4l2_requestbuffers video_requestbuffers;
	memset(&video_requestbuffers,0,sizeof(struct v4l2_requestbuffers));	
	video_requestbuffers.count=4;
	video_requestbuffers.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視頻捕獲設備*/
	video_requestbuffers.memory=V4L2_MEMORY_MMAP;
	if(ioctl(video_fd,VIDIOC_REQBUFS,&video_requestbuffers))return -3;
	printf("video_requestbuffers.count=%d\n",video_requestbuffers.count);

	/*4. 獲取緩衝區的首地址*/
	struct v4l2_buffer video_buffer;
	memset(&video_buffer,0,sizeof(struct v4l2_buffer));
	int i;
	for(i=0;i<video_requestbuffers.count;i++)
	{
		video_buffer.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視頻捕獲設備*/
		video_buffer.memory=V4L2_MEMORY_MMAP;
		video_buffer.index=i;/*緩衝區的編號*/
		if(ioctl(video_fd,VIDIOC_QUERYBUF,&video_buffer))return -4;
		/*映射地址*/
		image_buffer[i]=mmap(NULL,video_buffer.length,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,video_buffer.m.offset);
		printf("image_buffer[%d]=0x%X\n",i,image_buffer[i]);
	}
	/*5. 將緩衝區加入到採集隊列*/
	memset(&video_buffer,0,sizeof(struct v4l2_buffer));
	for(i=0;i<video_requestbuffers.count;i++)
	{
		video_buffer.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視頻捕獲設備*/
		video_buffer.memory=V4L2_MEMORY_MMAP;
		video_buffer.index=i;/*緩衝區的編號*/
		if(ioctl(video_fd,VIDIOC_QBUF,&video_buffer))return -5;
	}
	/*6. 啓動採集隊列*/
	int opt=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視頻捕獲設備*/
	if(ioctl(video_fd,VIDIOC_STREAMON,&opt))return -6;
	return 0;
}


//YUYV==YUV422
int yuyv_to_yuv420p(const unsigned char *in, unsigned char *out, unsigned int width, unsigned int height)
{
    unsigned char *y = out;
    unsigned char *u = out + width*height;
    unsigned char *v = out + width*height + width*height/4;
    unsigned int i,j;
    unsigned int base_h;
    unsigned int  is_u = 1;
    unsigned int y_index = 0, u_index = 0, v_index = 0;
    unsigned long yuv422_length = 2 * width * height;
    //序列爲YU YV YU YV,一個yuv422幀的長度 width * height * 2 個字節
    //丟棄偶數行 u v
    for(i=0; i<yuv422_length; i+=2)
    {
        *(y+y_index) = *(in+i);
        y_index++;
    }
    for(i=0; i<height; i+=2)
    {
        base_h = i*width*2;
        for(j=base_h+1; j<base_h+width*2; j+=2)
        {
            if(is_u)
            {
				*(u+u_index) = *(in+j);
				u_index++;
				is_u = 0;
            }
            else
            {
                *(v+v_index) = *(in+j);
                v_index++;
                is_u = 1;
            }
        }
    }
    return 1;
}


/*
子線程函數: 採集攝像頭的數據
*/
void *pthread_read_video_data(void *arg)
{
	/*1. 循環讀取攝像頭採集的數據*/
	struct pollfd fds;
	fds.fd=video_fd;
	fds.events=POLLIN;

	/*2. 申請存放JPG的數據空間*/
	struct v4l2_buffer video_buffer;
	while(1)
	{
		 /*(1)等待攝像頭採集數據*/
		 poll(&fds,1,-1);
		 /*(2)取出隊列裏採集完畢的緩衝區*/
		 video_buffer.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視頻捕獲設備*/
		 video_buffer.memory=V4L2_MEMORY_MMAP;
		 ioctl(video_fd,VIDIOC_DQBUF,&video_buffer);
         /*(3)處理圖像數據*/
		 /*YUYV數據轉YUV420P*/
		 pthread_mutex_lock(&mutex);   /*互斥鎖上鎖*/
		 yuyv_to_yuv420p(image_buffer[video_buffer.index],YUV420P_Buffer,VIDEO_WIDTH,VIDEO_HEIGHT);
		 pthread_mutex_unlock(&mutex); /*互斥鎖解鎖*/
		 pthread_cond_broadcast(&cond);/*廣播方式喚醒休眠的線程*/
		 
		 /*(4)將緩衝區再放入隊列*/
		 ioctl(video_fd,VIDIOC_QBUF,&video_buffer);
	}	
}

/*
子線程函數: 採集攝像頭的數據
*/
void *pthread_read_audio_data(void *arg)
{
    capture_audio_data(capture_handle,buffer_frames);
}

//運行示例:  ./a.out /dev/video0
int main(int argc,char **argv)
{
	if(argc!=3)
	{
		printf("./app </dev/videoX> <hw:X> \n");
		return 0;
	}
	int err;
	pthread_t thread_id;
	
	//創建鏈表頭
	list_head=List_CreateHead(list_head);
	
	/*初始化互斥鎖*/
	pthread_mutex_init(&mutex,NULL);
	/*初始化條件變量*/
	pthread_cond_init(&cond,NULL);

    /*初始化互斥鎖*/
	pthread_mutex_init(&mutex_audio,NULL);

	/*初始化攝像頭設備*/
	err=VideoDeviceInit(argv[1]);
	printf("VideoDeviceInit=%d\n",err);
	if(err!=0)return err;
	/*創建子線程: 採集攝像頭的數據*/
	pthread_create(&thread_id,NULL,pthread_read_video_data,NULL);
	/*設置線程的分離屬性: 採集攝像頭的數據*/
	pthread_detach(thread_id);

    capture_audio_data_init( argv[2]);
    /*創建子線程: 採集音頻的數據*/
	pthread_create(&thread_id,NULL,pthread_read_audio_data,NULL);
	/*設置線程的分離屬性: 採集攝像頭的數據*/
	pthread_detach(thread_id);
	
	char filename[100];
	time_t t;
	struct tm *tme;
	//開始音頻、視頻編碼
	while(1)
	{
		//獲取本地時間
		t=time(NULL);
		t=t+8*60*60; //+上8個小時
		tme=gmtime(&t);
		sprintf(filename,"%d-%d-%d-%d-%d-%d.mp4",tme->tm_year+1900,tme->tm_mon+1,tme->tm_mday,tme->tm_hour,tme->tm_min,tme->tm_sec);
		printf("視頻名稱:%s\n",filename);
		
		//開始視頻編碼
		video_audio_encode(filename);
	}
	return 0;
}

/*
函數功能: 創建鏈表頭
*/
struct AUDIO_DATA *List_CreateHead(struct AUDIO_DATA *head)
{
	if(head==NULL)
	{
		head=malloc(sizeof(struct AUDIO_DATA));
		head->next=NULL;
	}
	return head;
}

/*
函數功能: 插入新的節點
*/
void List_AddNode(struct AUDIO_DATA *head,unsigned char* audio_buffer)
{
	struct AUDIO_DATA *tmp=head;
	struct AUDIO_DATA *new_node;
	/*找到鏈表尾部*/
	while(tmp->next)
	{
		tmp=tmp->next;
	}
	/*插入新的節點*/
	new_node=malloc(sizeof(struct AUDIO_DATA));
	new_node->audio_buffer=audio_buffer;
	new_node->next=NULL;
	/*將新節點接入到鏈表*/
	tmp->next=new_node;
}

/*
函數功能:刪除節點
*/
void List_DelNode(struct AUDIO_DATA *head,unsigned char* audio_buffer)
{
	struct AUDIO_DATA *tmp=head;
	struct AUDIO_DATA *p;
	/*找到鏈表中要刪除的節點*/
	while(tmp->next)
	{
		p=tmp;
		tmp=tmp->next;
		if(tmp->audio_buffer==audio_buffer)
		{
			p->next=tmp->next;
			free(tmp);
		}
	}
}

/*

*/


/*
函數功能:遍歷鏈表,得到節點總數量
*/
int List_GetNodeCnt(struct AUDIO_DATA *head)
{
	int cnt=0;
	struct AUDIO_DATA *tmp=head;
	while(tmp->next)
	{
		tmp=tmp->next;
		cnt++;
	}
	return cnt;
}


int capture_audio_data_init( char *audio_dev)
{
	int i;
	int err;
	
	buffer_frames = 1024;
	unsigned int rate = AUDIO_RATE_SET;// 常用的採樣頻率: 44100Hz 、16000HZ、8000HZ、48000HZ、22050HZ
	capture_handle;// 一個指向PCM設備的句柄
	snd_pcm_hw_params_t *hw_params; //此結構包含有關硬件的信息,可用於指定PCM流的配置
	
	/*註冊信號捕獲退出接口*/
	printf("進入main\n");
	/*PCM的採樣格式在pcm.h文件裏有定義*/
	format=SND_PCM_FORMAT_S16_LE; // 採樣位數:16bit、LE格式
	
	/*打開音頻採集卡硬件,並判斷硬件是否打開成功,若打開失敗則打印出錯誤提示*/
	if ((err = snd_pcm_open (&capture_handle, audio_dev,SND_PCM_STREAM_CAPTURE,0))<0) 
	{
		printf("無法打開音頻設備: %s (%s)\n",  audio_dev,snd_strerror (err));
		exit(1);
	}
	printf("音頻接口打開成功.\n");
	
 
	/*分配硬件參數結構對象,並判斷是否分配成功*/
	if((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) 
	{
		printf("無法分配硬件參數結構 (%s)\n",snd_strerror(err));
		exit(1);
	}
	printf("硬件參數結構已分配成功.\n");
	
	/*按照默認設置對硬件對象進行設置,並判斷是否設置成功*/
	if((err=snd_pcm_hw_params_any(capture_handle,hw_params)) < 0) 
	{
		printf("無法初始化硬件參數結構 (%s)\n", snd_strerror(err));
		exit(1);
	}
	printf("硬件參數結構初始化成功.\n");
 
	/*
		設置數據爲交叉模式,並判斷是否設置成功
		interleaved/non interleaved:交叉/非交叉模式。
		表示在多聲道數據傳輸的過程中是採樣交叉的模式還是非交叉的模式。
		對多聲道數據,如果採樣交叉模式,使用一塊buffer即可,其中各聲道的數據交叉傳輸;
		如果使用非交叉模式,需要爲各聲道分別分配一個buffer,各聲道數據分別傳輸。
	*/
	if((err = snd_pcm_hw_params_set_access (capture_handle,hw_params,SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) 
	{
		printf("無法設置訪問類型(%s)\n",snd_strerror(err));
		exit(1);
	}
	printf("訪問類型設置成功.\n");
 
	/*設置數據編碼格式,並判斷是否設置成功*/
	if ((err=snd_pcm_hw_params_set_format(capture_handle, hw_params,format)) < 0) 
	{
		printf("無法設置格式 (%s)\n",snd_strerror(err));
		exit(1);
	}
	fprintf(stdout, "PCM數據格式設置成功.\n");
 
	/*設置採樣頻率,並判斷是否設置成功*/
	if((err=snd_pcm_hw_params_set_rate_near (capture_handle,hw_params,&rate,0))<0) 
	{
		printf("無法設置採樣率(%s)\n",snd_strerror(err));
		exit(1);
	}
	printf("採樣率設置成功\n");
 
	/*設置聲道,並判斷是否設置成功*/
	if((err = snd_pcm_hw_params_set_channels(capture_handle, hw_params,AUDIO_CHANNEL_SET)) < 0) 
	{
		printf("無法設置聲道數(%s)\n",snd_strerror(err));
		exit(1);
	}
	printf("聲道數設置成功.\n");
 
	/*將配置寫入驅動程序中,並判斷是否配置成功*/
	if ((err=snd_pcm_hw_params (capture_handle,hw_params))<0) 
	{
		printf("無法向驅動程序設置參數(%s)\n",snd_strerror(err));
		exit(1);
	}
	printf("參數設置成功.\n");
	/*使採集卡處於空閒狀態*/
	snd_pcm_hw_params_free(hw_params);
 
	/*準備音頻接口,並判斷是否準備好*/
	if((err=snd_pcm_prepare(capture_handle))<0) 
	{
		printf("無法使用音頻接口 (%s)\n",snd_strerror(err));
		exit(1);
	}
	printf("音頻接口準備好.\n");
	
	return 0;
}

unsigned char audio_read_buff[2048];
//音頻採集線程
int capture_audio_data(snd_pcm_t *capture_handle,int buffer_frames)
{
	int err;
	//因爲frame樣本數固定爲1024,而雙通道,每個採樣點2byte,所以一次要發送1024*2*2byte數據給frame->data[0];
	/*配置一個數據緩衝區用來緩衝數據*/
	//snd_pcm_format_width(format) 獲取樣本格式對應的大小(單位是:bit)
	int frame_byte=snd_pcm_format_width(format)/8;

	/*開始採集音頻pcm數據*/
	printf("開始採集數據...\n");
	int i;
	char *audio_buffer;
	while(1) 
	{
		audio_buffer=malloc(buffer_frames*frame_byte*AUDIO_CHANNEL_SET); //2048
		if(audio_buffer==NULL)
		{
			printf("緩衝區分配錯誤.\n");
			break;
		}
		
		/*從聲卡設備讀取一幀音頻數據:2048字節*/
		if((err=snd_pcm_readi(capture_handle,audio_read_buff,buffer_frames))!=buffer_frames) 
		{
			  printf("從音頻接口讀取失敗(%s)\n",snd_strerror(err));
			  exit(1);
		}
	
		pthread_mutex_lock(&mutex_audio); /*互斥鎖上鎖*/
		memcpy(audio_buffer,audio_read_buff,buffer_frames*frame_byte*AUDIO_CHANNEL_SET);
		//添加節點
		List_AddNode(list_head,audio_buffer);
		pthread_mutex_unlock(&mutex_audio); /*互斥鎖解鎖*/
	}
 
	/*釋放數據緩衝區*/
	free(audio_buffer);
 
	/*關閉音頻採集卡硬件*/
	snd_pcm_close(capture_handle);
 
	/*關閉文件流*/
	fclose(pcm_data_file);

	return 0;
}

五、編譯代碼

wbyq@wbyq:/mnt/hgfs/linux-share-dir/c_code/ffmpeg_video_audio$ cat Makefile 
all:
	gcc ffmpeg_encode_video_audio.c -I /home/wbyq/work_pc/ffmpeg-4.2.2/_install/include -L /home/wbyq/work_pc/ffmpeg-4.2.2/_install/lib -lavcodec -lavfilter -lavutil -lswresample -lavdevice -lavformat -lpostproc -lswscale -L/home/wbyq/work_pc/x264-snapshot-20181217-2245/_install/lib -lx264 -lm -lpthread -lasound

運行方式:

  ./a.out /dev/video0 hw:0

這裏的hw:0 表示當前電腦默認的聲卡設備。

六、運行效果

 

下面公衆號裏有全套的C/C++、單片機、QT基礎教程(歡迎關注):

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章