FFMPEG學習(三)ffmpeg+SDL2.0製作簡單的視頻播放器(圖像+聲音)

FFMPEG 學習(二) ffmpe+SDL2.0製作簡單的播放器(基本流程梳理)寫的播放程序,僅僅支持圖像的播放。這次我們把聲音加上

參考文章:

http://blog.yundiantech.com/?log=blog&id=10

 在播放圖像的基礎上,我們單獨通過SDL播放了聲音。入上面的圖片。結合下面的代碼。屢一下audio部分代碼的思路

1、首先開啓了SDL 的audio設備。callback中等待播放數據,播放的數據來自於隊列。

2、讀取流、放在隊列中。

3、解碼程序等待隊列中有數據,然後解析,copy到SDL 的audio buf中

總之:流讀取到了纔有數據方到隊列,隊列有數據才能解碼,解碼了纔有數據copy到buf,buf裏有數據,audio設備纔有聲音輸出

 

 一、源碼

/* ************************************************************************
 *       Filename:  audio_demo.c
 *    Description:  
 *        Version:  1.0
 *        Created:  2020年05月25日 21時38分10秒
 *       Revision:  none
 *       Compiler:  gcc
 *         Author:  YOUR NAME (), 
 *        Company:  
 * ************************************************************************/
#ifdef __cplusplus
extern "C"
{
#endif
    //#include <libavcodec/avcodec.h>
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libavutil/pixfmt.h>
    #include <libswscale/swscale.h>

    #include <SDL.h>
    #include <SDL_audio.h>
    #include <SDL_types.h>
    #include <SDL_name.h>
    #include <SDL_main.h>
    #include <SDL_config.h>
    #include <SDL2/SDL_thread.h>
#ifdef __cplusplus
};
#endif
#include <stdio.h>

//Refresh
#define SFM_REFRESH_EVENT  (SDL_USEREVENT + 1)
#define SFM_PLAY_END_EVENT (SDL_USEREVENT + 2)
 


//先按照提供的acc文件播放,後續修改
#define SDL_AUDIO_BUFFER_SIZE 1024
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
#define PLAY_REFRESH_TIME	10				//刷新時間間隔

typedef struct PacketQueue {
    AVPacketList *first_pkt, *last_pkt;		//首尾指針
    int nb_packets;				//包個數
    int size;					//隊列大小
    SDL_mutex *mutex;				//隊列互斥鎖
    SDL_cond *cond;				//隊列全局變量
} PacketQueue;

//SDL播放回調
void audio_callback(void *userdata, Uint8 *stream, int len);
//播放流隊列初始化
void packet_queue_init(PacketQueue *q);
//播放流添加入隊列
int packet_queue_put(PacketQueue *q, AVPacket *pkt);
//播放流出隊列
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block);
//從流中解碼音頻數據
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size);
///顯示視頻幀
void Video_Display_To_Window(
								AVPacket 		*packet,		//包
								AVFrame 		*pFrame, 		//流
								AVFrame 		*pFrameYUV, 	//YUV流
								AVCodecContext 	*pCodecCtx,		//解碼上下文
								SDL_Texture    	*bmp,			//顯示句柄
								SDL_Renderer   	*renderer,		//渲染器句柄
								struct SwsContext *img_convert_ctx,//轉碼數據
								SDL_Rect 		rect			//顯示區域
								);

int sfp_refresh_thread(void *opaque);	//SDL新線程入口			
int thread_exit=0;

// 分配解碼過程中的使用緩存
//AVFrame* audioFrame = avcodec_alloc_frame();
AVFrame *audioFrame = NULL;
PacketQueue *audioq = NULL;

#undef main
int main(int argc, char *argv[])
{
	AVFormatContext	*pFormatCtx;				//視頻上下文
	AVCodecContext	*pCodecCtx, *videoCodeCtx;				//解碼上下文
	int				i, audioindex, videoindex;
	AVCodec			*pCodec, *videoCodec;		//解碼器
	AVPacket 		*packet, *VideoPacket;			//打包的流數據
	AVFrame 		*FrameAudio, *FrameVideo, *FrameYUV;
	uint8_t* out_buffer;
	int numBytes;

	//========SDL==========
	//SDL---------------------------
	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;
	//=========SDL===end===
	//char *filename = "./video2.mp4";	
	
	//SDL thread
	SDL_Thread *video_tid;
	SDL_Event event; 

	if(1 == argc)
	{
		printf("%s:%d parameter error \n", __func__, __LINE__);
		return -1;
	}

	audioFrame = av_frame_alloc();
	//audioFrame = avcodec_alloc_frame();

//==================固定的結構================start====================
	//1、註冊所有的解碼器
	av_register_all();
	printf("%s -- %d\n", __func__, __LINE__);

	//2、分配空間
	pFormatCtx = avformat_alloc_context();
	//videoCodeCtx = avformat_alloc_context();
	printf("%s -- %d\n", __func__, __LINE__);

	if(pFormatCtx)
	{
		//3、打開文件
		if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) //error
		{
			printf("Couldn't open input stream.\n");
			return -1;
		}
	}
	printf("%s -- %d\n", __func__, __LINE__);
	printf("pFormatCtx=%p\n", pFormatCtx);

	//dump 視頻信息
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	//4、檢索視頻流信息
	if(avformat_find_stream_info(pFormatCtx,NULL)<0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	printf("%s -- %d\n", __func__, __LINE__);

	//5、查找流位置
	audioindex = -1;
	videoindex = -1;
	printf("pFormatCtx->nb_streams:%d\n", pFormatCtx->nb_streams);
	printf("AVMEDIA_TYPE_AUDIO:%d\n", AVMEDIA_TYPE_AUDIO);
	printf("AVMEDIA_TYPE_VIDEO:%d\n", AVMEDIA_TYPE_VIDEO);
	for(i = 0; i < pFormatCtx->nb_streams; i ++)
	{
		printf("cur type :%d\n", pFormatCtx->streams[i]->codec->codec_type);
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			audioindex = i;
		}
		if((pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) && (videoindex < 0))
		{
			videoindex = i;
		}
	}
	if((videoindex < 0) && (audioindex < 0))
	{
		printf("not fine audio or video stream! \n");
		return -1;
	}	

	printf("%s -- %d\n", __func__, __LINE__);
	printf("audioindex:%d, videoindex:%d\n", audioindex, videoindex);
	//6、解碼上下文
	pCodecCtx = pFormatCtx->streams[audioindex]->codec;
	videoCodeCtx = pFormatCtx->streams[videoindex]->codec;
	printf("%s -- %d\n", __func__, __LINE__);

	//7、、獲取視頻解碼器
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	videoCodec = avcodec_find_decoder(videoCodeCtx->codec_id);
	if(pCodec==NULL)
	{
		printf("Codec not found.\n");
		return -1;
	}

 	printf("%s -- %d\n", __func__, __LINE__);
	//8、、打開解碼器
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
	{
		printf("Could not open audio codec.\n");
		return -1;
	}

	printf("%s -- %d\n", __func__, __LINE__);
	if(avcodec_open2(videoCodeCtx, videoCodec,NULL)<0)
	{
		printf("Could not open video codec.\n");
		return -1;
	}
//======================固定的結構===========end====================

	
//=================SDL=============start===========
	printf("%s -- %d\n", __func__, __LINE__);
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 
	
#if 1
	 ///  打開SDL播放設備 - 開始
	SDL_LockAudio();
	SDL_AudioSpec spec;
	SDL_AudioSpec wanted_spec;
	wanted_spec.freq = pCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = pCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = pCodecCtx;
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
	{
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}
	SDL_UnlockAudio();
	SDL_PauseAudio(0); //開始播放
#endif
	printf("%s -- %d\n", __func__, __LINE__);

    ///  打開SDL播放設備 - 結束
//=================SDL=============end=============
	//創建一個隊列,並且初始化
	//PacketQueue audioq = {0};
	audioq = (PacketQueue *)malloc(sizeof(PacketQueue));
	packet_queue_init(audioq);

	packet = av_packet_alloc();
	if(!packet)
	{
		printf("Could not malloc packet.\n");
		return -1;
	}

	// 分配解碼過程中的使用緩存
    	//FrameAudio = avcodec_alloc_frame();
	FrameAudio = av_frame_alloc();
	//FrameVideo = av_frame_alloc();
	//FrameYUV   = av_frame_alloc();
	if(!FrameAudio)
	{
		printf("Could not malloc FrameAudio.\n");
		return -1;
	}
	// Debug -- Begin
	printf("比特率 %3d\n", pFormatCtx->bit_rate);
	printf("解碼器名稱 %s\n", pCodecCtx->codec->long_name);
	printf("time_base  %d \n", pCodecCtx->time_base);
	printf("聲道數  %d \n", pCodecCtx->channels);
	printf("sample per second  %d \n", pCodecCtx->sample_rate);
	// Debug -- End

	//=============video====================
	FrameVideo = av_frame_alloc();
	FrameYUV = av_frame_alloc();

	///解碼後數據轉成YUV420P
	img_convert_ctx = sws_getContext(videoCodeCtx->width, videoCodeCtx->height, videoCodeCtx->pix_fmt, 
		videoCodeCtx->width, videoCodeCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, videoCodeCtx->width,videoCodeCtx->height);

	out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
	avpicture_fill((AVPicture *) FrameYUV, out_buffer, AV_PIX_FMT_YUV420P,
	    videoCodeCtx->width, videoCodeCtx->height);

	//int y_size = videoCodeCtx->width * videoCodeCtx->height;



	//packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一個packet
	//av_new_packet(packet, y_size); //分配packet的數據
	
	screen_w = videoCodeCtx->width;
	screen_h = videoCodeCtx->height;
	//SDL 2.0 Support for multiple windows
	//創建窗口
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);
 
	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}
	//創建一個渲染器
	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,videoCodeCtx->width,videoCodeCtx->height);  
 
	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;
	//================video===end===========
	printf("%s -- %d\n", __func__, __LINE__);

	video_tid = SDL_CreateThread(sfp_refresh_thread, "play thread", NULL); //創建一個線程定時刷新
	while(1)
	{
		//wait
		SDL_WaitEvent(&event);  //等待消息到來,再刷新
		if(event.type==SFM_REFRESH_EVENT)
		{
			if(av_read_frame(pFormatCtx, packet) < 0) //流已經讀取完了
			{
				//Exit Thread
				thread_exit=1;
				break;
			}

			if(packet->stream_index == audioindex)
			{
				//SDL_Delay(10);
				packet_queue_put(audioq, packet);
		    		//這裏我們將數據存入隊列 因此不調用 av_free_packet 釋放
			}else if(packet->stream_index == videoindex) //display video stream
			{
				/*
				Video_Display_To_Window(
									AVPacket 		*packet			//包
									AVFrame 		*pFrame, 		//流
									AVFrame 		*pFrameYUV, 	//YUV流
									AVCodecContext 	*pCodecCtx,		//解碼上下文
									SDL_Texture    	*bmp,			//顯示句柄
									SDL_Renderer   	*renderer,		//渲染器句柄
									struct SwsContext *img_convert_ctx,//轉碼數據									
									SDL_Rect 		rect			//顯示區域
									);
				*/
#if 1
				Video_Display_To_Window( packet,
										FrameVideo, FrameYUV,
										videoCodeCtx, sdlTexture,
										sdlRenderer, img_convert_ctx,
										 sdlRect);
#endif
			}else{
				// Free the packet that was allocated by av_read_frame
		    		av_free_packet(packet);
			}
		}
		
	}

	printf("read finished!\n");
	
	while(1); //等待SDL把audio數據播放完整,會導程序無法退出,屏蔽掉,audio又會播放不完整
	
	SDL_Quit();

	av_free(FrameAudio);
	avcodec_close(pCodecCtx);// Close the codec
	avformat_close_input(&pFormatCtx);// Close the video file

	return 0;
}


void audio_callback(void *userdata, Uint8 *stream, int len)
{
    AVCodecContext *aCodecCtx = (AVCodecContext *) userdata;
    int len1, audio_data_size;

    static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;

    /*   len是由SDL傳入的SDL緩衝區的大小,如果這個緩衝未滿,我們就一直往裏填充數據 */
    while (len > 0) {
        /*  audio_buf_index 和 audio_buf_size 標示我們自己用來放置解碼出來的數據的緩衝區,*/
        /*   這些數據待copy到SDL緩衝區, 當audio_buf_index >= audio_buf_size的時候意味着我*/
        /*   們的緩衝爲空,沒有數據可供copy,這時候需要調用audio_decode_frame來解碼出更
         /*   多的楨數據 */

        if (audio_buf_index >= audio_buf_size) {
            audio_data_size = audio_decode_frame(aCodecCtx, audio_buf,sizeof(audio_buf));
            /* audio_data_size < 0 標示沒能解碼出數據,我們默認播放靜音 */
            if (audio_data_size < 0) {
                /* silence */
                audio_buf_size = 1024;
                /* 清零,靜音 */
                memset(audio_buf, 0, audio_buf_size);
            } else {
                audio_buf_size = audio_data_size;
            }
            audio_buf_index = 0;
        }
	/*  查看stream可用空間,決定一次copy多少數據,剩下的下次繼續copy */
        len1 = audio_buf_size - audio_buf_index;
        if (len1 > len) {
            len1 = len;
        }

        memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}

//初始化隊列
void packet_queue_init(PacketQueue *q) 
{
    memset(q, 0, sizeof(PacketQueue));
    q->mutex = SDL_CreateMutex();
    q->cond = SDL_CreateCond();
}

//數據進入到隊列中
int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
    AVPacketList *pkt1;
    if (av_dup_packet(pkt) < 0) {
        return -1;
    }
    pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
    if (!pkt1)
        return -1;
    pkt1->pkt = *pkt;
    pkt1->next = NULL;

    SDL_LockMutex(q->mutex);
    if (!q->last_pkt)
        q->first_pkt = pkt1;
    else
        q->last_pkt->next = pkt1;
    q->last_pkt = pkt1;
    q->nb_packets++;
    q->size += pkt1->pkt.size;
    SDL_CondSignal(q->cond);

    SDL_UnlockMutex(q->mutex);
    return 0;
}

//從隊列中取走數據
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 
{
    AVPacketList *pkt1;
    int ret;

    SDL_LockMutex(q->mutex);

    for (;;) {

        pkt1 = q->first_pkt;
        if (pkt1) {
            q->first_pkt = pkt1->next;
            if (!q->first_pkt)
                q->last_pkt = NULL;
            q->nb_packets--;
            q->size -= pkt1->pkt.size;
            *pkt = pkt1->pkt;
            av_free(pkt1);
            ret = 1;
            break;
        } else if (!block) {
            ret = 0;
            break;
        } else {
            SDL_CondWait(q->cond, q->mutex);
        }
    }
    SDL_UnlockMutex(q->mutex);
    return ret;
}

//解碼流數據到
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
    static AVPacket pkt;
    static uint8_t *audio_pkt_data = NULL;
    static int audio_pkt_size = 0;
    int len1, data_size;
    

    for(;;)
    {
        if(packet_queue_get(audioq, &pkt, 1) < 0) //沒有數據了,暫時認爲播放完成了
        {
            return -1;
        }
        audio_pkt_data = pkt.data;
        audio_pkt_size = pkt.size;
        while(audio_pkt_size > 0)
        {
            int got_picture;

            int ret = avcodec_decode_audio4( aCodecCtx, audioFrame, &got_picture, &pkt);
            if( ret < 0 ) {
                printf("Error in decoding audio frame.\n");
                exit(0);
            }

            if( got_picture ) {
                int in_samples = audioFrame->nb_samples;
                short *sample_buffer = (short*)malloc(audioFrame->nb_samples * 2 * 2);
                memset(sample_buffer, 0, audioFrame->nb_samples * 4);

                int i=0;
                float *inputChannel0 = (float*)(audioFrame->extended_data[0]);

                // Mono  單聲道
                if( audioFrame->channels == 1 ) {
                    for( i=0; i<in_samples; i++ ) {
                        float sample = *inputChannel0++;
                        if( sample < -1.0f ) {
                            sample = -1.0f;
                        } else if( sample > 1.0f ) {
                            sample = 1.0f;
                        }

                        sample_buffer[i] = (int16_t)(sample * 32767.0f);
                    }
                } else { // Stereo  雙聲道
                    float* inputChannel1 = (float*)(audioFrame->extended_data[1]);
                    for( i=0; i<in_samples; i++) {
                        sample_buffer[i*2] = (int16_t)((*inputChannel0++) * 32767.0f);
                        sample_buffer[i*2+1] = (int16_t)((*inputChannel1++) * 32767.0f);
                    }
                }
//                fwrite(sample_buffer, 2, in_samples*2, pcmOutFp);
                memcpy(audio_buf,sample_buffer,in_samples*4);
                free(sample_buffer);
            }

            audio_pkt_size -= ret;

            if (audioFrame->nb_samples <= 0)
            {
                continue;
            }

            data_size = audioFrame->nb_samples * 4;

            return data_size;
        }
        if(pkt.data)
            av_free_packet(&pkt);
   }
}

///顯示視頻幀
void Video_Display_To_Window(
								AVPacket 		*packet,		//包
								AVFrame 		*pFrame, 		//流
								AVFrame 		*pFrameYUV, 	//YUV流
								AVCodecContext 	*pCodecCtx,		//解碼上下文
								SDL_Texture    	*bmp,			//顯示句柄
								SDL_Renderer   	*renderer,		//渲染器句柄
								struct SwsContext *img_convert_ctx,//轉碼數據
								SDL_Rect 		rect			//顯示區域
								)
{
	int got_picture;
	SDL_Rect DisplayRect = {0};
	int ret = 0;
	int64_t start_time = 0;
	//AVPacket *streamPack = (AVPacket *)av_malloc(sizeof(AVPacket));
	
	DisplayRect.x = rect.x;
	DisplayRect.y = rect.y;
	DisplayRect.w = rect.w;
	DisplayRect.h = rect.h;

	//printf("display stream!\n");
	start_time = av_gettime();
	//printf("%s start:%d\n", __func__, start_time);
	//fast = av_gettime();
	//printf("fast time: %f ms\n", fast/1000);
	ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
	if(ret < 0)
	{
		printf("Decode Error.\n");
		return -1;
	}
	if(got_picture)
	{	 
		//printf("update texture to SDL widows!\n");
		sws_scale(
			 img_convert_ctx,
			 (uint8_t const * const *)pFrame->data,
			 pFrame->linesize,
			 0,
			 pCodecCtx->height,
			 pFrameYUV->data,
			 pFrameYUV->linesize
			 );

		////iPitch 計算yuv一行數據佔的字節數
		//SDL_UpdateTexture( bmp, &DisplayRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );
		SDL_UpdateYUVTexture(bmp, &DisplayRect, 
			pFrameYUV->data[0], pFrameYUV->linesize[0], 
			pFrameYUV->data[1], pFrameYUV->linesize[1], 
			pFrameYUV->data[2], pFrameYUV->linesize[2]);
		SDL_RenderClear( renderer );
		SDL_RenderCopy( renderer, bmp, &DisplayRect, &DisplayRect );
		SDL_RenderPresent( renderer );
		SDL_Delay(10);
	}
	//printf("%s end:%d\n", __func__, av_gettime() - start_time);
	//av_free_packet(packet);
}

//Thread
int sfp_refresh_thread(void *opaque)
{
	SDL_Event event;
   	while (thread_exit==0) 
	{
   		event.type = SFM_REFRESH_EVENT;
   		SDL_PushEvent(&event);
		//Wait x ms
   		SDL_Delay(PLAY_REFRESH_TIME);
   	}
   	return 0;
}

二、Makefile部分

CC		= gcc 
CFLAGS	= -g -O2 -D_REENTRANT -I/usr/local/include/SDL2 -I/usr/local/include/ffmpeg  -DHAVE_OPENGL -g
LIBS	= -lSDL2_test -Wl,-rpath,/usr/local/lib -Wl,--enable-new-dtags -L/usr/local/lib -lSDL2  -L/monchickey/ffmpeg/lib -lavformat -lavcodec -lavutil -lswscale
srcdir	= .

video_play : $(srcdir)/audio_video.c
	$(CC) -o $@ $^ $(CFLAGS) $(LIBS)

.PHONY : clean
clean:
	rm -rf video_play

 

 三、目前發現的問題

1、音、視屏跟原始的對比,不同步,聲音比圖像晚了

2、SDL刷新圖像的程序不要添加printf打印、否則畫面卡的一愣一愣的

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章