FFMPEG+QT4.8+VS2010下的多線程BMP序列幀合成視頻

大量參照了雷神的博客點擊打開鏈接

videoMuxer()和flush_encoder()是直接copy的雷神的代碼,但是videoMutex我直接和融到了makeH264()中,所以這裏沒使用,flush_encoder()中也有相應改動

BMP讀取:

#ifndef READBMPTHREAD_H
#define READBMPTHREAD_H
#include "IncludeFile.h"
#include <QtCore/QThread>
//#include <map>
static int gFirstNum = 0;//每個線程開始的位置
static int gOkNum = 0; //讀取的總數
class ReadBmpThread : public QThread
{
	Q_OBJECT

public:
	ReadBmpThread(const QMap<int,QString>&bmpMap, QMutex&mutex,bool &goOn,int threadNum=1, QObject *parent=0);
	~ReadBmpThread();
	const QMap<int,QString>& m_bmpMap;
	QMap<int,AVFrame*>m_readFrameMap;
	QMutex& m_mutex;
	bool& m_goOn;
	int m_allThreadNum;
	
protected:
	void run();

private:
	//讀取單張BMP,24或32bit
	int readBmp2FrameMap(const char*bmpPath,int num);
	static int s_allThreadNum;
	int m_thisThreadNum;
};

#endif // READBMPTHREAD_H

#include "readbmpthread.h"
extern void writeMsg(char* msg, char* mode="a+", char* fileName = ERR_TXT);
ReadBmpThread::ReadBmpThread(const QMap<int,QString>&bmpMap, QMutex&mutex, bool &goOn,int threadNum,QObject *parent)
	:m_bmpMap(bmpMap),/*m_frameMap(frameMap), */m_mutex(mutex),m_goOn(goOn),m_allThreadNum(threadNum),QThread(parent)
{
	m_thisThreadNum = s_allThreadNum;
	printf("thread : %d init\n",m_thisThreadNum);
	s_allThreadNum++;
	//m_thisOkNum=0;
}

ReadBmpThread::~ReadBmpThread()
{
	
}
int ReadBmpThread::s_allThreadNum = 0;

void ReadBmpThread::run()
{
	printf("thread : %d run\n",m_thisThreadNum);
	QTime readAllTime;
	readAllTime.start();
	
		
	int n = m_thisThreadNum;

	for(;n<m_bmpMap.size();n+=m_allThreadNum)
	{
		while(m_readFrameMap.size()>30)
		{
			;
		//sleep(5);
		}
		readBmp2FrameMap(m_bmpMap.value(n).toLatin1().data(), n);
	}
	m_goOn = false;
	int aa = readAllTime.elapsed();
	//m_frameMap.insert(-1,NULL);//讀取結束
}
int ReadBmpThread::readBmp2FrameMap(const char*bmpPath,int num)
{
	QTime time_;
	time_.start();
	

	AVFrame* rgbFrame =NULL;
	//wf_bmp2Frame(bmpPath, rgbFrame);
	//二進制讀方式打開指定的圖像文件  
	FILE *fp=fopen(bmpPath,"rb");  
	if(fp==0) return 0;  
	//跳過位圖文件頭結構BITMAPFILEHEADER  
	fseek(fp, sizeof(BITMAPFILEHEADER),0);  
	//定義位圖信息頭結構變量,讀取位圖信息頭進內存,存放在變量head中  
	BITMAPINFOHEADER head;     
	fread(&head, sizeof(BITMAPINFOHEADER), 1,fp);    
	//獲取圖像寬、高、每像素所佔位數等信息  
	int biWidth = head.biWidth;  
	int biHeight = head.biHeight;  
	int biBitCount = head.biBitCount;  
	//定義變量,計算圖像每行像素所佔的字節數(必須是4的倍數)  
	int lineByte=(biWidth * biBitCount/8+3)/4*4;  
	//位圖深度
	if(biBitCount != 24 && biBitCount != 32)
	{
		char err[100];
		sprintf(err,"bmp file: %s  is not  24 or 32 bit\n ", bmpPath);
		writeMsg(err);
		return 0;
	}
	//申請位圖數據所需要的空間,讀位圖數據進內存  
	uint8_t* bmpBuffer = (uint8_t* )av_malloc(lineByte* biHeight);
	fread(bmpBuffer,1,lineByte * biHeight,fp);  
	//關閉文件  
	fclose(fp);  
	
	//倒置(轉正)
	if(1)
	{
		uint8_t* tempData = (uint8_t*)av_malloc(lineByte*biHeight);
		for(int h=0; h<biHeight; h++)
		{
			memcpy(tempData+(biHeight-1-h)*lineByte, bmpBuffer+(h*lineByte), lineByte);
		}
		memcpy(bmpBuffer,tempData,lineByte*biHeight);
		av_free(tempData);
	}

	//
	AVPixelFormat pixFmt ;
	if(biBitCount == 24)
		pixFmt = AV_PIX_FMT_BGR24;
	else if(biBitCount == 32)
		pixFmt = AV_PIX_FMT_RGB32;
	
	//avpicture_fill((AVPicture *)rgbFrame, bmpBuffer,/*AV_PIX_FMT_RGB24 */pixFmt, biWidth, biHeight);  
	//rgbFrame->width = biWidth;
	//rgbFrame->height = biHeight;
	//rgbFrame->linesize[0] = lineByte;
	//rgbFrame->format = pixFmt;
	//printf("w:%d , h:%d, linesize: %d\n",biWidth, biHeight, lineByte);
	rgbFrame = av_frame_alloc();
	avpicture_fill((AVPicture *)rgbFrame, bmpBuffer,/*AV_PIX_FMT_RGB24 */pixFmt, biWidth, biHeight);  
	rgbFrame->width = biWidth;
	rgbFrame->height = biHeight;
	rgbFrame->linesize[0] = lineByte;
	rgbFrame->format = pixFmt;
	printf("w:%d , h:%d, linesize: %d\n",biWidth, biHeight, lineByte);
	//dstFrame = rgbFrame;
	//	rgbFrame = NULL;
	std::cout<<"read time: "<<time_.elapsed()<<"\n";
	QMutexLocker locker(&m_mutex);
	//m_frameMap.insert(num, rgbFrame);
	m_readFrameMap.insert(num, rgbFrame);
	locker.unlock();
	//num++;
	return 0;  
}

視頻合成:

#ifndef MAKEVIDEO_H
#define MAKEVIDEO_H
#include "IncludeFile.h"

#include <QtCore/QObject>
#include "readbmpthread.h"
class MakeVideo : public QObject
{
	Q_OBJECT

public:
	MakeVideo(const char* imgDir, int threadNum=1,QObject *parent=0);
	~MakeVideo();
	void updateBmpList(const char* dirPath);
	bool makeH264(const char*videoName,int fps, int qp, int flat, int threadNum, int width, int height);
	int flush_encoder(AVFormatContext *fmt_encode,AVFormatContext*fmt_write,unsigned int stream_index);
	int videoMuxer( const char* inVideoName, const char* outVideoName);
	QMap<int,QString>m_bmpMap;//BMP filepath
	QMutex m_mutex;
	bool goOn;
	int m_threadNum;
private:
int okNum;
	 QList<ReadBmpThread*>m_threadList;
	 QString temp;	
};

#endif // MAKEVIDEO_H
#include "makevideo.h"
extern void writeMsg(char* msg, char* mode="a+", char* fileName = ERR_TXT);
MakeVideo::MakeVideo(const char* imgDir,int threadNum,QObject *parent)
	:m_threadNum(threadNum),goOn(true), QObject(parent)
{
	updateBmpList(imgDir);
	printf("總BMP數量:%d\n",m_bmpMap.size());
//	Sleep(2000);
	for(int n=0;n<m_threadNum;n++)
	{
		ReadBmpThread* rbt =new ReadBmpThread(m_bmpMap, m_mutex,goOn, threadNum);
		m_threadList.push_back(rbt);
		rbt->start();
	}
}

MakeVideo::~MakeVideo()
{

}
void MakeVideo::updateBmpList(const char* dirPath)
{

	QDir dir;
	dir.setPath(dirPath);
	if(!dir.exists())
	{
		char err[64];
		sprintf(err,"dir: %s error\n", dirPath);
		writeMsg(err);
		exit(0);
	}
	QStringList strList = dir.entryList(QStringList()<<"*.BMP"<<"*.bmp",QDir::Files, QDir::Time);
	if(strList.size() == 0)
	{
		printf("dir: %s have no file\n", dirPath);
		return;
	}
	for(int n=0;n<strList.size();n++)
	{
		QString s=strList[n];
		int bmpNum= s.split(".").at(0).toInt();
		m_bmpMap.insert(bmpNum,dir.filePath(strList[n]));
	}
	
}
bool MakeVideo::makeH264(const char*videoName,int fps, int qp, int flat,int threadNum, int width, int height)
{
	
	printf("start makeH264\n");

	AVFormatContext* fmt = NULL;
	AVCodec* codec = NULL;
	AVCodecContext* codecCtx= NULL;
	AVFrame* receiveFrame= NULL;
	AVFrame* yuvFrame= NULL;
	AVStream* outStream= NULL;
	printf("start init\n");
	av_register_all();
	printf("av_register_all init\n");
	//if(avformat_alloc_output_context2(&fmt,NULL , NULL, videoName) < 0)
	//{
	//	printf("avformat_alloc_output_context2()   failure\n");
	//	return false;
	//}
	fmt = avformat_alloc_context();
	fmt->oformat = av_guess_format("h264", NULL,NULL);
//fmt->oformat = av_guess_format("mpg2", NULL,NULL);
	if(avio_open2(&fmt->pb, videoName, AVIO_FLAG_READ_WRITE, NULL, NULL) != 0)
	{
		char err[100];
		sprintf(err,"Couldn't open output file: %s  \n",videoName);
		writeMsg(err);
		exit(1);
	}
	printf("codecCtx init\n");
	outStream = avformat_new_stream(fmt, NULL);
	codecCtx = outStream->codec;
	codecCtx->codec_id = fmt->oformat->video_codec;  //比如265  265
	codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;  
	codecCtx->pix_fmt = AV_PIX_FMT_YUV444P/*AV_PIX_FMT_YUV444P*/;  
	codecCtx->width = width;
	codecCtx->height = height;  
	codecCtx->time_base.num = 1;  
	codecCtx->time_base.den = fps;
	codecCtx->gop_size=10;  

	//codecCtx->cqp=0;
	codecCtx->thread_count=threadNum;
	codecCtx->thread_type;
	codecCtx->max_b_frames=0;  

/*使用固定QP或者碼率控制*/
#if 1
	codecCtx->qmin = qp;  
	codecCtx->qmax = qp+5;
	codecCtx->bit_rate = 10*1024*1024*8;//只在QP範圍內生效,爲QP波動範圍中的最小比特率
#else 
	codecCtx->flags|=CODEC_FLAG_QSCALE;
	codecCtx->qmin = 0;  
	codecCtx->qmax =49;
	codecCtx->bit_rate = 5*1024*1024*8;
	codecCtx->rc_min_rate=1*1024*1024*8;
	codecCtx->rc_max_rate=6*1024*1024*5;
#endif
	

	//codecCtx->level = 50;
	printf("codecCtx OK\n");
	//codecCtx->flags = CODEC_FLAG_LOW_DELAY;
	AVDictionary *param = 0;  
	//H.264  
	if(codecCtx->codec_id == AV_CODEC_ID_H264) 
	{  
		av_dict_set(¶m, "preset", "slower", 0);  
	
	switch(flat)
	{
	case 1: av_dict_set(¶m, "tune", "film", 0);  break;
	case 2: av_dict_set(¶m, "tune", "animation", 0);  break;
	case 3: av_dict_set(¶m, "tune", "grain", 0);  break;
	case 4: av_dict_set(¶m, "tune", "stillimage", 0);  break;
	case 0: break;
	}	

	//	av_dict_set(¶m, "tune", "grain", 0);  

	/*film:  電影、真人類型; 
	animation:  動畫; 
	grain:      需要保留大量的grain時用; 
	stillimage:  靜態圖像編碼時使用; 
	psnr:      爲提高psnr做了優化的參數; 
	ssim:      爲提高ssim做了優化的參數; 
	fastdecode: 可以快速解碼的參數; 
	zerolatency:零延遲,用在需要非常低的延遲的情況下,比如電視電話會議的編碼。*/
	}  
	if(codecCtx->codec_id == AV_CODEC_ID_H265){  
		av_dict_set(¶m, "x265-params", "qp=5", 0);  
		av_dict_set(¶m, "preset", "ultrafast", 0);  
		av_dict_set(¶m, "tune", "zero-latency", 0);  
	}  
//	avformat_write_header(fmt, NULL);
	codec = avcodec_find_encoder(codecCtx->codec_id);
	avcodec_open2(codecCtx, codec, ¶m);
	int n=0;//關鍵數據
	
	QTime allTime;//關鍵數據
	int encodecNum=0;
	allTime.start();
	okNum = 0;
/************************************************************************/
/* AVI								                                                                     */
/************************************************************************/
#if 1
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ofmt_ctx = NULL;
	AVPacket pkt_avi;
	const char* out_filename = videoName;
	int ret, i;
	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
	if (!ofmt_ctx) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		return 0;
	}
	ofmt = ofmt_ctx->oformat;
	int videoindex_out=-1;

		//根據輸入流創建輸出流(Create output AVStream according to input AVStream)

			
			AVStream *in_stream = outStream;
			AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
			if (!out_stream) {
				printf( "Failed allocating output stream\n");
				ret = AVERROR_UNKNOWN;
				return 0;
			}
			videoindex_out=out_stream->index;
			//複製AVCodecContext的設置(Copy the settings of AVCodecContext)
			if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
				printf( "Failed to copy context from input to output stream codec context\n");
				return 0;
			}
			out_stream->codec->codec_tag = 0;
			if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
				out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	int audioindex_a=-1,audioindex_out=-1;
	//打開輸出文件(Open output file)
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
			printf( "Could not open output file '%s'", out_filename);
			return 0;
		}
	}
	//寫文件頭(Write file header)
	if (avformat_write_header(ofmt_ctx, NULL) < 0) {
		printf( "Error occurred when opening output file\n");
		return 0;
	}
	int frame_index=0;
	int64_t cur_pts_v=0,cur_pts_a=0;
//
#endif
	while(1)
	{
	//	printf("while\n");
		QTime time__;
		time__.start();
		if(!goOn && n>=m_bmpMap.size() )
			break;
		QTime time;
		time.start();
		
		
		
	int frameMapNum;
	for(frameMapNum=n; frameMapNum>=m_threadNum; frameMapNum-=m_threadNum)
	{
		;
	}
	QMutexLocker lo(&m_mutex);
	receiveFrame = m_threadList[frameMapNum]->m_readFrameMap.value(n);
	lo.unlock();
	static unsigned long  inNullTime = 0;
	if(receiveFrame == NULL)
	{
		if(inNullTime ==0)
			inNullTime = allTime.elapsed();
		//如果等待線程讀取BMP時間超過30S,退出
		else	if( (allTime.elapsed()-inNullTime) >30000)
		{
			char *err = "wait time too long,receive frame data failure\n";
			writeMsg(err);
			exit(1);
		}
		continue;
	}
	else
	{
		inNullTime = 0;
	}
	std::cout<<"time 1 :"<<time.elapsed()<<std::endl;
	if(!receiveFrame)
	{
		printf("receive frame failure\n");
		continue;
	}			
			//
		int numBytes = avpicture_get_size(AV_PIX_FMT_YUV444P, receiveFrame->width, receiveFrame->height);
		uint8_t* buffer = (uint8_t*)av_malloc(numBytes);
		yuvFrame = av_frame_alloc();
		avpicture_fill((AVPicture*)yuvFrame, buffer, AV_PIX_FMT_YUV444P,receiveFrame->width, receiveFrame->height);

		struct SwsContext *img_convert_ctx = NULL;
		img_convert_ctx =
			sws_getCachedContext(img_convert_ctx, receiveFrame->width,
			receiveFrame->height, /*AV_PIX_FMT_RGB32*/(AVPixelFormat)receiveFrame->format,
			receiveFrame->width, receiveFrame->height,
			AV_PIX_FMT_YUV444P, SWS_SPLINE/*SWS_POINT*//*SWS_BICUBIC*/,
			NULL, NULL, NULL);

		if( !img_convert_ctx ) {
			//fprintf(stderr, "Cannot initialize sws conversion context\n");
			writeMsg("Cannot initialize sws conversion context\n");
			exit(1);
		}
		sws_scale(img_convert_ctx, (const uint8_t* const*)receiveFrame->data,
						receiveFrame->linesize, 0, receiveFrame->height, yuvFrame->data,
						yuvFrame->linesize);

		sws_freeContext(img_convert_ctx);
		yuvFrame->pts=n;
		yuvFrame->pkt_dts=n;
		AVPacket pkt;
		av_new_packet(&pkt,receiveFrame->width*receiveFrame->height*3); 
		int isOK;
		time.restart();
		if(avcodec_encode_video2(codecCtx, &pkt, yuvFrame, &isOK)==0)
		{
			std::cout<<"\n  encodec time:  "<<time.elapsed()<<"\n";
			if(isOK)
			{
				temp+="Y"+QString::number(pkt.dts)+"Y";
				time.restart();
				pkt.stream_index = outStream->index;

				//把pts dts的設置放到Frame,這裏接收的是延時的幀,不對應當前幀
				pkt.pts ;
				pkt.dts ;
				
			//	av_write_frame(fmt, &pkt);
				/************************************************************************/
				/* AVI寫                                                                     */
				/************************************************************************/
				pkt.stream_index=videoindex_out;
				printf("Write 1 Packet. size:%5d\tpts:%8d\n",pkt.size,pkt.pts);
				//寫入(Write)
				if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
					printf( "Error muxing packet\n"); 
					break;
				}
				//////////////////////////////////////////////////////////////////////////
				printf("num: %d\t",n+1);
				if(n%10 == 0)
					std::cout<<"\n";
				okNum++;
				std::cout<<".........................write :"<<time.elapsed()<<"\n";
			}
			else
			{
				temp+=QString::number(n)+="\t";
				if(temp.size()%8==0)
					temp+="\n";
			}
		}//encode OK
		av_free(yuvFrame->data[0]);
		av_free(receiveFrame->data[0]);
		av_free(yuvFrame);	
		
		av_free(receiveFrame);
		//加了就報錯,不知爲何
	//	sws_freeContext(img_convert_ctx);
		QMutexLocker locker(&m_mutex);
		  m_threadList[frameMapNum]->m_readFrameMap.remove(n);
		locker.unlock();
		receiveFrame = NULL;
		yuvFrame = NULL;
		av_free_packet(&pkt);
		n++;
		qDebug()<<"-------------------------------------------while  time:"<<time__.elapsed();
	}//while
	flush_encoder(fmt,ofmt_ctx,videoindex_out);
	  av_write_trailer(ofmt_ctx);
	avformat_free_context(fmt);
	avformat_free_context(ofmt_ctx);
	std::cout<<"oknum: "<<okNum<<"  encodecnum:"<<encodecNum;
	std::cout<<"\n allTime: "<<allTime.elapsed();
	Sleep(1000);
}

int MakeVideo::flush_encoder(AVFormatContext *fmt_encode,AVFormatContext*fmt_write,unsigned int stream_index){  
	
	int ret;  
	int got_frame;  
	
	if (!(fmt_encode->streams[stream_index]->codec->codec->capabilities &  
		CODEC_CAP_DELAY))  
		return 0;  
	while (1) {  
		static int dtsNum = 0;
		AVPacket enc_pkt;  
		enc_pkt.data = NULL;  
		enc_pkt.size = 0;  
		av_init_packet(&enc_pkt);  
		ret = avcodec_encode_video2 (fmt_encode->streams[stream_index]->codec, &enc_pkt,  
			NULL, &got_frame);  
		/*	enc_pkt.dts = dtsNum;
		enc_pkt.pts=dtsNum;*/
		av_frame_free(NULL);  
		if (ret < 0)  
			break;  
		if (!got_frame){  
			ret=0;  
			break;  
		}  
		printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n",enc_pkt.size);  
		/* mux encoded frame */  
		
		ret = av_interleaved_write_frame(fmt_write, &enc_pkt);  
		av_free_packet(&enc_pkt);
		if (ret < 0)  
			break;  
		dtsNum++;
		okNum++;

	}  

	return ret;  
}  
int MakeVideo::videoMuxer( const char* inVideoName, const char* outVideoName)
{
	QTime time;
	time.start();
    AVOutputFormat *ofmt = NULL;
    //輸入對應一個AVFormatContext,輸出對應一個AVFormatContext
    //(Input AVFormatContext and Output AVFormatContext)
    AVFormatContext *ifmt_ctx_v = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
     

	 
   const  char *in_filename_v = inVideoName;//輸入文件名(Input file URL)
    
  const   char *out_filename = outVideoName;//輸出文件名(Output file URL)
    av_register_all();
    //輸入(Input)
    if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
        printf( "Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {
        printf( "Failed to retrieve input stream information");
        goto end;
    }
 
    printf("Input Information=====================\n");
    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);
    printf("======================================\n");
    //輸出(Output)
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        printf( "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;
    int videoindex_v=-1,videoindex_out=-1;
    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        //根據輸入流創建輸出流(Create output AVStream according to input AVStream)
        if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
        videoindex_v=i;
        AVStream *in_stream = ifmt_ctx_v->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            printf( "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        videoindex_out=out_stream->index;
        //複製AVCodecContext的設置(Copy the settings of AVCodecContext)
        if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
            printf( "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        break;
        }
    }
 
    int audioindex_a=-1,audioindex_out=-1;
    //輸出一下格式------------------
    printf("Output Information====================\n");
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    printf("======================================\n");
    //打開輸出文件(Open output file)
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
            printf( "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    //寫文件頭(Write file header)
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        printf( "Error occurred when opening output file\n");
        goto end;
    }
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;
 
    //FIX
#if USE_H264BSF
    AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb"); 
#endif
#if USE_AACBSF
    AVBitStreamFilterContext* aacbsfc =  av_bitstream_filter_init("aac_adtstoasc"); 
#endif
 
    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index=0;
        AVStream *in_stream, *out_stream;
 
 
        //獲取一個AVPacket(Get an AVPacket)
        if(1){
            ifmt_ctx=ifmt_ctx_v;
            stream_index=videoindex_out;
 
            if(av_read_frame(ifmt_ctx, &pkt) >= 0){
                do{
                    if(pkt.stream_index==videoindex_v){
                        cur_pts_v=pkt.pts;
                        break;
                    }
                }while(av_read_frame(ifmt_ctx, &pkt) >= 0);
            }else{
                break;
            }
		}/*else{
		 ifmt_ctx=ifmt_ctx_a;
		 stream_index=audioindex_out;
		 if(av_read_frame(ifmt_ctx, &pkt) >= 0){
		 do{
		 if(pkt.stream_index==audioindex_a){
		 cur_pts_a=pkt.pts;
		 break;
		 }
		 }while(av_read_frame(ifmt_ctx, &pkt) >= 0);
		 }else{
		 break;
		 }

		 }*/
 
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[stream_index];
//FIX
#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
        //FIX:No PTS (Example: Raw H.264)
        //Simple Write PTS
        if(pkt.pts==AV_NOPTS_VALUE){
            //Write PTS
            AVRational time_base1=in_stream->time_base;
            //Duration between 2 frames (us)
            int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
            //Parameters
            pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
            pkt.dts=pkt.pts;
            pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
            frame_index++;
        }
        /* copy packet */
        //轉換PTS/DTS(Convert PTS/DTS)
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        pkt.stream_index=stream_index;
 
        printf("Write 1 Packet. size:%5d\tpts:%8d\n",pkt.size,pkt.pts);
        //寫入(Write)
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
           printf( "Error muxing packet\n"); 
            break;
        }
        av_free_packet(&pkt);
 
    }
    //寫文件尾(Write file trailer)
    av_write_trailer(ofmt_ctx);
 
#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif
 
end:
    avformat_close_input(&ifmt_ctx_v);
  //  avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        printf( "Error occurred.\n");
        return -1;
    }
	printf("\nall time:%d\n",time.elapsed());
	
    return 0;
}


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章