【FFMPEG】YUV420P格式圖像疊加,拼接

YUV420P格式圖像疊加,拼接

網上的很多代碼都有問題!!!
話不多說上代碼:

需要用到庫:
  • ffmpeg,用於解碼h264,也可以直接用yuv圖像進行拼接,只要記住yuv420p的數據存放格式和採樣格式就不會出錯。
  • sdl2,用於實時顯示
#include <stdio.h>
#include <sstream>
#include <string>
#include <map>

#define __STDC_CONSTANT_MACROS

#ifdef _WIN32
#define snprintf _snprintf
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/imgutils.h"
#include "SDL.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include "SDL.h"

#ifdef __cplusplus
};
#endif
#endif


#define ENABLE_SDL	1
#define ENABLE_YUV	0

#define REFRESH_EVENT  (SDL_USEREVENT + 1)
#define BREAK_EVENT  (SDL_USEREVENT + 2)

#define FRAMEWITH 640
#define FRAMEHEIGTH 480

#define BGWIDTH 800	
#define BGHEIGHT 600




static int thread_exit = 0;


int refresh_video1(void* opaque) {
	thread_exit = 0;
	while (!thread_exit) {
		SDL_Event event;
		event.type = REFRESH_EVENT;
		SDL_PushEvent(&event);
		SDL_Delay(40);
	}
	thread_exit = 0;
	SDL_Event event;
	event.type = BREAK_EVENT;
	SDL_PushEvent(&event);

	return 0;
}



int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	int             i, videoindex;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;
	AVFrame* pFrame, * pFrameYUV, * pDstFrame;
	uint8_t* out_buffer;
	AVPacket* packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext* img_convert_ctx = nullptr;
	//輸入文件路徑
	//char filepath[] = "test_640x480.mp4";
	char filepath[] = "C:/Users/Li/Desktop/test.h264";

	int frame_cnt;

	using namespace std;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}
	if (videoindex == -1) {
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pFormatCtx->streams[videoindex]->codec;

	//pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

	pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
	if (pCodec == NULL) {
		printf("Codec not found.\n");
		return -1;
	}

	pCodecCtx = avcodec_alloc_context3(pCodec);

	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		printf("Could not open codec.\n");
		return -1;
	}
	/*
	 * 在此處添加輸出視頻信息的代碼
	 * 取自於pFormatCtx,使用fprintf()
	 */
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = new uint8_t[av_image_get_buffer_size(AV_PIX_FMT_YUV420P, FRAMEWITH, FRAMEHEIGTH, 1)];
	//avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, FRAMEWITH, FRAMEHEIGTH);
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, FRAMEWITH, FRAMEHEIGTH, 1);
	//avcodec_alloc_frame();
	//av_image_get_buffer_size();
	//av_frame_get_buffer
	pDstFrame = av_frame_alloc();
	//int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, FRAMEWITH * 2, FRAMEHEIGTH);
	int nDstSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, BGWIDTH, BGHEIGHT, 1);
	uint8_t* dstbuf = new uint8_t[nDstSize];
	//avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, FRAMEWITH * 2, FRAMEHEIGTH);
	av_image_fill_arrays(pDstFrame->data, pDstFrame->linesize, dstbuf, AV_PIX_FMT_YUV420P, BGWIDTH, BGHEIGHT, 1);

	//pDstFrame->width = FRAMEWITH * 2;
	//pDstFrame->height = FRAMEHEIGTH;
	pDstFrame->width = BGWIDTH;
	pDstFrame->height = BGHEIGHT;
	pDstFrame->format = AV_PIX_FMT_YUV420P;

	//將預先分配的AVFrame圖像背景數據設置爲黑色背景  
	memset(pDstFrame->data[0], 0x00, BGWIDTH * BGHEIGHT);
	memset(pDstFrame->data[1], 0x80, BGWIDTH * BGHEIGHT / 4);
	memset(pDstFrame->data[2], 0x80, BGWIDTH * BGHEIGHT / 4);
	//memset(pDstFrame->data[0], 0, BGWIDTH * BGHEIGHT * 3);
	//memset(pDstFrame->data[1], 0, BGWIDTH * BGHEIGHT);
	//memset(pDstFrame->data[2], 0, BGWIDTH * BGHEIGHT);

	//packet = (AVPacket*)av_malloc(sizeof(AVPacket));
	packet = av_packet_alloc();
	//Output Info-----------------------------
	//printf("--------------- File Information ----------------\n");
	//av_dump_format(pFormatCtx, 0, filepath, 0);
	//printf("-------------------------------------------------\n");

	//av_parser_init()


#if ENABLE_SDL

	ret = SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
	if (ret) {
		printf("Could not initialize SDL - %s\n", SDL_GetError());
		return -10;
	}

	SDL_Window* screen = SDL_CreateWindow("Simple Video Play SDL2", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		BGWIDTH, BGHEIGHT, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
	if (!screen) {
		printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
		return -11;
	}

	SDL_Renderer* renderer = SDL_CreateRenderer(screen, -1, 0);
	if (!renderer) {
		printf("SDL : could not create renderer - exiting:&s\n", SDL_GetError());
		return -12;
	}

	SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
		BGWIDTH, BGHEIGHT);

	SDL_Rect rect;

	rect.x = 0;
	rect.y = 0;
	rect.h = BGHEIGHT;
	rect.w = BGWIDTH;

	SDL_Thread* refreshThread = SDL_CreateThread(refresh_video1, NULL, NULL);
	SDL_Event event;
#endif


	int count = 0;
	frame_cnt = 0;
#if ENABLE_YUV
	//FILE *fp_h264 = fopen("test264.h264", "wb+");
	//FILE* fp_yuv420 = fopen("test_yuv420p(411)_1280x480.yuv", "wb+");
	FILE* fp_yuv420 = fopen("test_yuv420p(411)_800x800.yuv", "wb+");
#endif
	while (av_read_frame(pFormatCtx, packet) >= 0) {
#if ENABLE_SDL
		SDL_WaitEvent(&event);
		if (event.type = REFRESH_EVENT) {
#endif
			if (packet->stream_index == videoindex) {
				/*
				 * 在此處添加輸出H264碼流的代碼
				 * 取自於packet,使用fwrite()
				 */
				 //fwrite(packet->data,1,packet->size,fp_h264);
				pCodecCtx->width = pFormatCtx->streams[videoindex]->codec->width;
				pCodecCtx->height = pFormatCtx->streams[videoindex]->codec->height;
				pCodecCtx->pix_fmt = pFormatCtx->streams[videoindex]->codec->pix_fmt;
				pCodecCtx->extradata = pFormatCtx->streams[videoindex]->codec->extradata;
				pCodecCtx->extradata_size = pFormatCtx->streams[videoindex]->codec->extradata_size;

				ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
				if (ret < 0) {
					printf("Decode Error.\n");
					return -1;
				}
				if (got_picture) {
					if (!img_convert_ctx) {
						img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
							FRAMEWITH, FRAMEHEIGTH, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
					}
					sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
						pFrameYUV->data, pFrameYUV->linesize);
					printf("Decoded frame index: %d\n", frame_cnt);
					 //if (pFrameYUV)
					 //{
					 //	int nYIndex = 0;
					 //	int nUVIndex = 0;

					 //	for (int i = 0; i < FRAMEHEIGTH; i++)
					 //	{
					 //		//Y  
					 //		memcpy(pDstFrame->data[0] + i * BGWIDTH, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
					 //		memcpy(pDstFrame->data[0] + FRAMEWITH + i * BGWIDTH, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);

					 //		nYIndex++;
					 //	}

					 //	for (int i = 0; i < FRAMEHEIGTH / 4; i++)
					 //	{
					 //		//U
					 //		memcpy(pDstFrame->data[1] + i * BGWIDTH, pFrameYUV->data[1] + nUVIndex * FRAMEWITH, FRAMEWITH);
					 //		memcpy(pDstFrame->data[1] + FRAMEWITH + i * BGWIDTH, pFrameYUV->data[1] + nUVIndex * FRAMEWITH, FRAMEWITH);

					 //		//V  
					 //		memcpy(pDstFrame->data[2] + i * BGWIDTH, pFrameYUV->data[2] + nUVIndex * FRAMEWITH, FRAMEWITH);
					 //		memcpy(pDstFrame->data[2] + FRAMEWITH + i * BGWIDTH, pFrameYUV->data[2] + nUVIndex * FRAMEWITH, FRAMEWITH);

					 //		nUVIndex++;
					 //	}
					 //}

					//if (pFrameYUV) {
						//for (int i = 80; i < FRAMEHEIGTH; ++i) {
							//memcpy(pDstFrame->data[0] + (i)*BGWIDTH*3 + 240, pFrameYUV->data[0] + i * FRAMEWITH*3, FRAMEWITH * 3);
					//		memcpy(pDstFrame->data[1] + (i)*BGWIDTH, pFrameYUV->data[1] + i * FRAMEWITH, FRAMEWITH);
					//		memcpy(pDstFrame->data[2] + (i)*BGWIDTH, pFrameYUV->data[2] + i * FRAMEWITH, FRAMEWITH);
						//}
					//}
					//疊加部分,重點!!!
					//
					if (pFrameYUV) {
						int nYIndex = 0;
						int nUVIndex = 0;

						for (int i = 80; i < FRAMEHEIGTH; ++i) {
							//Y
							memcpy(pDstFrame->data[0] + (i)*BGWIDTH+ 80, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
							++nYIndex;
						}

						for (int i = 40; i < FRAMEHEIGTH / 2; ++i) {
							//U
							memcpy(pDstFrame->data[1] + (i)*(BGWIDTH/2)+40, pFrameYUV->data[1] + nUVIndex * (FRAMEWITH/2), FRAMEWITH /2 );
							//V
							memcpy(pDstFrame->data[2] + (i)*(BGWIDTH/2)+ 40, pFrameYUV->data[2] + nUVIndex * (FRAMEWITH/2), FRAMEWITH / 2);
							++nUVIndex;
						}
					}

#if ENABLE_SDL
					SDL_UpdateYUVTexture(texture, &rect,
						pDstFrame->data[0], pDstFrame->linesize[0],
						pDstFrame->data[1], pDstFrame->linesize[1],
						pDstFrame->data[2], pDstFrame->linesize[2]);
					//SDL_UpdateTexture(texture, &rect,
					//	pDstFrame->data[0], pDstFrame->linesize[0]);

					SDL_RenderClear(renderer);
					SDL_RenderCopy(renderer, texture, NULL, &rect);
					SDL_RenderPresent(renderer);

#endif

					//fwrite(pDstFrame->data[0], 1, FRAMEWITH * FRAMEHEIGTH * 2, fp_yuv420);
					//fwrite(pDstFrame->data[1], 1, FRAMEWITH * FRAMEHEIGTH / 2, fp_yuv420);
					//fwrite(pDstFrame->data[2], 1, FRAMEWITH * FRAMEHEIGTH / 2, fp_yuv420);
#if ENABLE_YUV
					fwrite(pDstFrame->data[0], 1, BGWIDTH * BGHEIGHT, fp_yuv420);
					fwrite(pDstFrame->data[1], 1, BGWIDTH * BGHEIGHT / 4, fp_yuv420);
					fwrite(pDstFrame->data[2], 1, BGWIDTH * BGHEIGHT / 4, fp_yuv420);
#endif

					frame_cnt++;
				}
			}
#if ENABLE_SDL
		} else {
			break;
		}
#endif
		count++;
		av_frame_unref(pFrame);
		av_packet_unref(packet);
	}

#if ENABLE_YUV
	//fclose(fp_h264);
	fclose(fp_yuv420);
#endif
	sws_freeContext(img_convert_ctx);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	av_frame_free(&pDstFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	if (out_buffer) {
		delete[] out_buffer;
		out_buffer = nullptr;
	}


	return 0;
}

疊加部分

						//yuv420p數據存放格式爲y1y2y3...u1u2u3...v1v2v3
						//採樣格式爲色度在水平和垂直兩個方向上,採樣率都減半。這樣每相鄰的4個Y公用一個U,V數據,U,V的數據量都爲Y的1/4
						int nYIndex = 0;
						int nUVIndex = 0;

						for (int i = 80; i < FRAMEHEIGTH; ++i) {
							//Y
							memcpy(pDstFrame->data[0] + (i)*BGWIDTH+ 80, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
							++nYIndex;
						}

						//之所以寫兩個循環,是爲了更好的理解yuv420p數據格式,這裏可以看到,必須將uv分量的水平以及垂直分量分別減半,單一四分任一分量都是會出問題的。
						for (int i = 40; i < FRAMEHEIGTH / 2; ++i) {
							//U
							memcpy(pDstFrame->data[1] + (i)*(BGWIDTH/2)+40, pFrameYUV->data[1] + nUVIndex * (FRAMEWITH/2), FRAMEWITH /2 );
							//V
							memcpy(pDstFrame->data[2] + (i)*(BGWIDTH/2)+ 40, pFrameYUV->data[2] + nUVIndex * (FRAMEWITH/2), FRAMEWITH / 2);
							++nUVIndex;
						}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章