關於如何在Android上用FFmpeg+SDL2.0解碼顯示圖像參考如何在Android用FFmpeg+SDL2.0解碼顯示圖像 ,關於如何在Android使用FFmpeg+SDL2.0解碼聲音參考如何在Android用FFmpeg+SDL2.0解碼聲音。但是該文章有一個問題,就是解碼出來的聲音有很大的噪音,基本無法聽清,這是由於對於聲音的處理有問題。故本文參考ffmpeg-sdl音頻播放分析聲音解碼的處理,解碼出來的聲音就正常了。
博主的開發環境:Ubuntu 14.04 64位,Eclipse+CDT+ADT+NDK。
在文章開始之前假定你已經知道如何使用NDK編譯FFmpeg,以及知道如何移植SDL2.0到Android平臺上來並且知道如何解碼顯示圖像和聲音了。
我們初步瞭解如何解碼視頻圖像和視頻聲音。但是這些都是初步簡單的解碼出來而已,我們的主要功能是處理非常多:它是通過事件循環中運行,讀取數據包,並在視頻解碼。所以,我們要做的就是拆分這些功能:我們將有一個線程,該線程將負責數據包進行解碼;這些數據包將被添加到該隊列中,並通過相應的音頻和視頻解碼線程讀取。
音頻線:我們在如何在Android用FFmpeg+SDL2.0解碼聲音這篇文章裏面有了一個初步的音頻線模型,在本文我們繼續去完善這個;
視頻線:頻線會相對比較麻煩一些,因爲我們要自己顯示自己的視頻畫面。我們將實際實現的代碼添加到主循環。我們的想法是對視頻進行解碼,保存生成到一個隊列中,然後創建一個自定義刷新事件(FF_REFRESH_EVENT),我們添加它到事件系統中,那麼當我們的事件循環看到這種情況,它會顯示在隊列的下一幀中,這樣一邊解碼一邊顯示。
工程中的目錄結構和如何在Android用FFmpeg+SDL2.0解碼聲音 一樣,只是在其基礎上繼續添加功能。
一、創建一個VideoPicture結構體用來保存解碼出來的圖像。
二、添加數據隊列的初始化、添加以及讀取的函數。
三、audio_decode_frame():解碼音頻
四、audio_callback(): 回調函數,向SDL緩衝區填充數據
五、創建視頻刷新相關的函數:
schedule_refresh():它主要的作用是告訴系統指定的毫秒數後推FF_REFRESH_EVENT。當我們看到它在事件隊列時,將依次調用視頻刷新功能。
六、添加視頻顯示函數:
因爲我們的屏幕可以是任意大小(我們設定我們爲640×480,並有一些方法來設置它,所以它是由用戶調整大小),我們需要動態地計算出我們有多大的矩形。因此,首先我們需要弄清楚我們的電影的顯示比例,這僅僅是寬度除以身高。某些編解碼器將有一個奇怪的樣本縱橫比,這就是一個像素,或樣品的寬度/高度。因爲在我們的編解碼器的上下文中的高度和寬度值以像素爲單位測量,實際的寬高比等於寬高比數倍的樣品長寬比。一些編解碼器將顯示0-5的寬高比,這表示每個像素僅僅是大小1x1的。然後,我們擴展了電影,以適應在我們的屏幕上。
七、分配顯示輸出內存空間:
使用隊列中,我們有兩個指針 - 寫入索引和閱讀索引。我們還跟蹤實際的照片有多少是在緩衝區中。要寫入隊列中,我們將首先等待我們的緩衝清除,所以我們足夠的空間來存儲我們VideoPicture。然後我們檢查,看看是否已經分配了覆蓋在我們的寫作索引。如果沒有,我們就必須分配一定的空間。如果窗口的大小發生了變化, 我們也要重新分配緩衝區。
八、解碼線程,將解碼器,建立音頻線,保存重要信息到數據結構中。
九、編寫Main函數用來調用解碼線程。
/*
* SDL_Lesson.c
*
* Created on: Aug 12, 2014
* Author: clarck
*/
#include <jni.h>
#include <android/native_window_jni.h>
#include "SDL.h"
#include "SDL_thread.h"
#include "SDL_events.h"
#include "../include/logger.h"
#include "../ffmpeg/include/libavcodec/avcodec.h"
#include "../ffmpeg/include/libavformat/avformat.h"
#include "../ffmpeg/include/libavutil/pixfmt.h"
#include "../ffmpeg/include/libswscale/swscale.h"
#include "../ffmpeg/include/libswresample/swresample.h"
#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_SIZE (5 * 16 * 1024)
#define MAX_VIDEO_SIZE (5 * 256 * 1024)
#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
#define VIDEO_PICTURE_QUEUE_SIZE 1
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
typedef struct VideoPicture {
SDL_Window *screen;
SDL_Renderer *renderer;
SDL_Texture *bmp;
AVFrame* rawdata;
int width, height; /*source height & width*/
int allocated;
} VideoPicture;
typedef struct VideoState {
char filename[1024];
AVFormatContext *ic;
int videoStream, audioStream;
AVStream *audio_st;
AVFrame *audio_frame;
PacketQueue audioq;
unsigned int audio_buf_size;
unsigned int audio_buf_index;
AVPacket audio_pkt;
uint8_t *audio_pkt_data;
int audio_pkt_size;
uint8_t *audio_buf;
DECLARE_ALIGNED(16,uint8_t,audio_buf2) [AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
enum AVSampleFormat audio_src_fmt;
enum AVSampleFormat audio_tgt_fmt;
int audio_src_channels;
int audio_tgt_channels;
int64_t audio_src_channel_layout;
int64_t audio_tgt_channel_layout;
int audio_src_freq;
int audio_tgt_freq;
struct SwrContext *swr_ctx;
AVStream *video_st;
PacketQueue videoq;
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
SDL_Thread *parse_tid;
SDL_Thread *audio_tid;
SDL_Thread *video_tid;
AVIOContext *io_ctx;
struct SwsContext *sws_ctx;
int quit;
} VideoState;
VideoState *global_video_state;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
pkt1 = (AVPacketList *) av_malloc(sizeof(AVPacketList));
if (!pkt1) {
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt) {
q->first_pkt = pkt1;
} else {
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for (;;) {
if (global_video_state->quit) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt) {
q->last_pkt = NULL;
}
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(VideoState *is) {
int len1, len2, decoded_data_size;
AVPacket *pkt = &is->audio_pkt;
int got_frame = 0;
int64_t dec_channel_layout;
int wanted_nb_samples, resampled_data_size;
for (;;) {
while (is->audio_pkt_size > 0) {
if (!is->audio_frame) {
if (!(is->audio_frame = avcodec_alloc_frame())) {
return AVERROR(ENOMEM);
}
} else
avcodec_get_frame_defaults(is->audio_frame);
len1 = avcodec_decode_audio4(is->audio_st->codec, is->audio_frame,
&got_frame, pkt);
if (len1 < 0) {
// error, skip the frame
is->audio_pkt_size = 0;
break;
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if (!got_frame)
continue;
/* 計算解碼出來的楨需要的緩衝大小 */
decoded_data_size = av_samples_get_buffer_size(NULL,
is->audio_frame->channels, is->audio_frame->nb_samples,
is->audio_frame->format, 1);
dec_channel_layout =
(is->audio_frame->channel_layout
&& is->audio_frame->channels
== av_get_channel_layout_nb_channels(
is->audio_frame->channel_layout)) ?
is->audio_frame->channel_layout :
av_get_default_channel_layout(
is->audio_frame->channels);
wanted_nb_samples = is->audio_frame->nb_samples;
if (is->audio_frame->format != is->audio_src_fmt
|| dec_channel_layout != is->audio_src_channel_layout
|| is->audio_frame->sample_rate != is->audio_src_freq
|| (wanted_nb_samples != is->audio_frame->nb_samples
&& !is->swr_ctx)) {
if (is->swr_ctx)
swr_free(&is->swr_ctx);
is->swr_ctx = swr_alloc_set_opts(NULL,
is->audio_tgt_channel_layout, is->audio_tgt_fmt,
is->audio_tgt_freq, dec_channel_layout,
is->audio_frame->format, is->audio_frame->sample_rate,
0, NULL);
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
fprintf(stderr, "swr_init() failed\n");
break;
}
is->audio_src_channel_layout = dec_channel_layout;
is->audio_src_channels = is->audio_st->codec->channels;
is->audio_src_freq = is->audio_st->codec->sample_rate;
is->audio_src_fmt = is->audio_st->codec->sample_fmt;
}
/* 這裏我們可以對採樣數進行調整,增加或者減少,一般可以用來做聲畫同步 */
if (is->swr_ctx) {
const uint8_t **in =
(const uint8_t **) is->audio_frame->extended_data;
uint8_t *out[] = { is->audio_buf2 };
if (wanted_nb_samples != is->audio_frame->nb_samples) {
if (swr_set_compensation(is->swr_ctx,
(wanted_nb_samples - is->audio_frame->nb_samples)
* is->audio_tgt_freq
/ is->audio_frame->sample_rate,
wanted_nb_samples * is->audio_tgt_freq
/ is->audio_frame->sample_rate) < 0) {
fprintf(stderr, "swr_set_compensation() failed\n");
break;
}
}
len2 = swr_convert(is->swr_ctx, out,
sizeof(is->audio_buf2) / is->audio_tgt_channels
/ av_get_bytes_per_sample(is->audio_tgt_fmt),
in, is->audio_frame->nb_samples);
if (len2 < 0) {
fprintf(stderr, "swr_convert() failed\n");
break;
}
if (len2
== sizeof(is->audio_buf2) / is->audio_tgt_channels
/ av_get_bytes_per_sample(is->audio_tgt_fmt)) {
fprintf(stderr,
"warning: audio buffer is probably too small\n");
swr_init(is->swr_ctx);
}
is->audio_buf = is->audio_buf2;
resampled_data_size = len2 * is->audio_tgt_channels
* av_get_bytes_per_sample(is->audio_tgt_fmt);
} else {
resampled_data_size = decoded_data_size;
is->audio_buf = is->audio_frame->data[0];
}
// We have data, return it and come back for more later
return resampled_data_size;
}
if (pkt->data)
av_free_packet(pkt);
memset(pkt, 0, sizeof(*pkt));
if (is->quit)
return -1;
if (packet_queue_get(&is->audioq, pkt, 1) < 0)
return -1;
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
}
return 0;
}
void audio_callback(void *userdata, Uint8 *stream, int len) {
VideoState *is = (VideoState *) userdata;
int len1, audio_data_size;
/* len是由SDL傳入的SDL緩衝區的大小,如果這個緩衝未滿,我們就一直往裏填充數據 */
while (len > 0) {
/* audio_buf_index 和 audio_buf_size 標示我們自己用來放置解碼出來的數據的緩衝區,*/
/* 這些數據待copy到SDL緩衝區, 當audio_buf_index >= audio_buf_size的時候意味着我*/
/* 們的緩衝爲空,沒有數據可供copy,這時候需要調用audio_decode_frame來解碼出更
/* 多的楨數據 */
if (is->audio_buf_index >= is->audio_buf_size) {
audio_data_size = audio_decode_frame(is);
/* audio_data_size < 0 標示沒能解碼出數據,我們默認播放靜音 */
if (audio_data_size < 0) {
/* silence */
is->audio_buf_size = 1024;
/* 清零,靜音 */
memset(is->audio_buf, 0, is->audio_buf_size);
} else {
is->audio_buf_size = audio_data_size;
}
is->audio_buf_index = 0;
}
/* 查看stream可用空間,決定一次copy多少數據,剩下的下次繼續copy */
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len) {
len1 = len;
}
memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
}
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {
SDL_Event event;
event.type = FF_REFRESH_EVENT;
event.user.data1 = opaque;
SDL_PushEvent(&event);
return 0;
}
static void schedule_refresh(VideoState *is, int delay) {
SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
}
int decode_interrupt_cb(void *opaque) {
return (global_video_state && global_video_state->quit);
}
void video_display(VideoState *is) {
SDL_Rect rect;
VideoPicture *vp;
float aspect_ratio;
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
if (is->video_st->codec->sample_aspect_ratio.num == 0) {
aspect_ratio = 0;
} else {
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
* is->video_st->codec->width / is->video_st->codec->height;
}
if (aspect_ratio <= 0.0) {
aspect_ratio = (float) is->video_st->codec->width
/ (float) is->video_st->codec->height;
}
rect.x = 0;
rect.y = 0;
rect.w = vp->width;
rect.h = vp->height;
SDL_UpdateYUVTexture(vp->bmp, &rect, vp->rawdata->data[0],
vp->rawdata->linesize[0], vp->rawdata->data[1],
vp->rawdata->linesize[1], vp->rawdata->data[2],
vp->rawdata->linesize[2]);
SDL_RenderClear(vp->renderer);
SDL_RenderCopy(vp->renderer, vp->bmp, &rect, &rect);
SDL_RenderPresent(vp->renderer);
}
}
void video_refresh_timer(void *userdata) {
VideoState *is = (VideoState *) userdata;
//VideoPicture *vp;
if (is->video_st) {
if (is->pictq_size == 0) {
schedule_refresh(is, 1);
} else {
//vp = &is->pictq[is->pictq_rindex];
/* Now, normally here goes a ton of code
about timing, etc. we're just going to
guess at a delay for now. You can
increase and decrease this value and hard code
the timing - but I don't suggest that ;)
We'll learn how to do it for real later.
*/
schedule_refresh(is, 80);
/* show the picture! */
video_display(is);
/* update queue for next picture! */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
is->pictq_rindex = 0;
}
SDL_LockMutex(is->pictq_mutex);
is->pictq_size--;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
} else {
schedule_refresh(is, 100);
}
}
void alloc_picture(void *userdata) {
VideoState *is = (VideoState *) userdata;
VideoPicture *vp;
vp = &is->pictq[is->pictq_windex];
if (vp->bmp) {
// we already have one make another, bigger/smaller
SDL_DestroyTexture(vp->bmp);
}
if(vp->rawdata) {
av_free(vp->rawdata);
}
// Allocate a place to put our YUV image on that screen
vp->screen = SDL_CreateWindow("My Player Window", SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, is->video_st->codec->width,
is->video_st->codec->height,
SDL_WINDOW_FULLSCREEN | SDL_WINDOW_OPENGL);
vp->renderer = SDL_CreateRenderer(vp->screen, -1, 0);
vp->bmp = SDL_CreateTexture(vp->renderer, SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING, is->video_st->codec->width, is->video_st->codec->height);
vp->width = is->video_st->codec->width;
vp->height = is->video_st->codec->height;
AVFrame* pFrameYUV = avcodec_alloc_frame();
if (pFrameYUV == NULL)
return;
int numBytes = avpicture_get_size(PIX_FMT_YUV420P, vp->width,
vp->height);
uint8_t* buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrameYUV, buffer, PIX_FMT_YUV420P,
vp->width, vp->height);
vp->rawdata = pFrameYUV;
SDL_LockMutex(is->pictq_mutex);
vp->allocated = 1;
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
}
int queue_picture(VideoState *is, AVFrame *pFrame) {
VideoPicture *vp;
//int dst_pic_fmt
AVPicture pict;
/* wait unitl we have space for a new pic */
SDL_LockMutex(is->pictq_mutex);
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->quit)
return -1;
// windex is set to 0 initially
vp = &is->pictq[is->pictq_windex];
/* allocate or resize the buffer ! */
if (!vp->bmp || vp->width != is->video_st->codec->width
|| vp->height != is->video_st->codec->height) {
SDL_Event event;
vp->allocated = 0;
/* we have to do it in the main thread */
event.type = FF_ALLOC_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
/* wait until we have a picture allocated */
SDL_LockMutex(is->pictq_mutex);
while (!vp->allocated && !is->quit) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
}
SDL_UnlockMutex(is->pictq_mutex);
if (is->quit) {
return -1;
}
/* We have a place to put our picture on the queue */
if (vp->rawdata) {
// Convert the image into YUV format that SDL uses
sws_scale(is->sws_ctx, (uint8_t const * const *) pFrame->data,
pFrame->linesize, 0, is->video_st->codec->height,
vp->rawdata->data, vp->rawdata->linesize);
/* now we inform our display thread that we have a pic ready */
if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
is->pictq_windex = 0;
}
SDL_LockMutex(is->pictq_mutex);
is->pictq_size++;
SDL_UnlockMutex(is->pictq_mutex);
}
return 0;
}
int video_thread(void *arg) {
VideoState *is = (VideoState *) arg;
AVPacket pkt1, *packet = &pkt1;
int frameFinished;
AVFrame *pFrame;
pFrame = av_frame_alloc();
for (;;) {
if (packet_queue_get(&is->videoq, packet, 1) < 0) {
// means we quit getting packets
break;
}
// Decode video frame
avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
packet);
// Did we get a video frame?
if (frameFinished) {
if (queue_picture(is, pFrame) < 0) {
break;
}
}
av_free_packet(packet);
}
av_free(pFrame);
return 0;
}
int audio_stream_component_open(VideoState *is, int stream_index) {
AVFormatContext *ic = is->ic;
AVCodecContext *codecCtx;
AVCodec *codec;
SDL_AudioSpec wanted_spec, spec;
int64_t wanted_channel_layout = 0;
int wanted_nb_channels;
/* SDL支持的聲道數爲 1, 2, 4, 6 */
/* 後面我們會使用這個數組來糾正不支持的聲道數目 */
const int next_nb_channels[] = { 0, 0, 1, 6, 2, 6, 4, 6 };
if (stream_index < 0 || stream_index >= ic->nb_streams) {
return -1;
}
codecCtx = ic->streams[stream_index]->codec;
wanted_nb_channels = codecCtx->channels;
if (!wanted_channel_layout
|| wanted_nb_channels
!= av_get_channel_layout_nb_channels(
wanted_channel_layout)) {
wanted_channel_layout = av_get_default_channel_layout(
wanted_nb_channels);
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
}
wanted_spec.channels = av_get_channel_layout_nb_channels(
wanted_channel_layout);
wanted_spec.freq = codecCtx->sample_rate;
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
fprintf(stderr, "Invalid sample rate or channel count!\n");
return -1;
}
wanted_spec.format = AUDIO_S16SYS; // 具體含義請查看“SDL宏定義”部分
wanted_spec.silence = 0; // 0指示靜音
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; // 自定義SDL緩衝區大小
wanted_spec.callback = audio_callback; // 音頻解碼的關鍵回調函數
wanted_spec.userdata = is; // 傳給上面回調函數的外帶數據
/* 打開音頻設備,這裏使用一個while來循環嘗試打開不同的聲道數(由上面 */
/* next_nb_channels數組指定)直到成功打開,或者全部失敗 */
while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n",
wanted_spec.channels, SDL_GetError());
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
if (!wanted_spec.channels) {
fprintf(stderr,
"No more channel combinations to tyu, audio open failed\n");
return -1;
}
wanted_channel_layout = av_get_default_channel_layout(
wanted_spec.channels);
}
/* 檢查實際使用的配置(保存在spec,由SDL_OpenAudio()填充) */
if (spec.format != AUDIO_S16SYS) {
fprintf(stderr, "SDL advised audio format %d is not supported!\n",
spec.format);
return -1;
}
if (spec.channels != wanted_spec.channels) {
wanted_channel_layout = av_get_default_channel_layout(spec.channels);
if (!wanted_channel_layout) {
fprintf(stderr, "SDL advised channel count %d is not supported!\n",
spec.channels);
return -1;
}
}
/* 把設置好的參數保存到大結構中 */
is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
is->audio_src_freq = is->audio_tgt_freq = spec.freq;
is->audio_src_channel_layout = is->audio_tgt_channel_layout =
wanted_channel_layout;
is->audio_src_channels = is->audio_tgt_channels = spec.channels;
codec = avcodec_find_decoder(codecCtx->codec_id);
if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
switch (codecCtx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audioStream = stream_index;
is->audio_st = ic->streams[stream_index];
is->audio_buf_size = 0;
is->audio_buf_index = 0;
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
packet_queue_init(&is->audioq);
SDL_PauseAudio(0); // 開始播放靜音
break;
default:
break;
}
return 0;
}
int video_stream_component_open(VideoState *is, int stream_index) {
AVFormatContext *pFormatCtx = is->ic;
AVCodecContext *codecCtx;
AVCodec *codec;
if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
return -1;
}
// Get a pointer to the codec context for the video stream
codecCtx = pFormatCtx->streams[stream_index]->codec;
codec = avcodec_find_decoder(codecCtx->codec_id);
if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
switch (codecCtx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
is->videoStream = stream_index;
is->video_st = pFormatCtx->streams[stream_index];
is->sws_ctx = sws_getContext(is->video_st->codec->width,
is->video_st->codec->height, is->video_st->codec->pix_fmt,
is->video_st->codec->width, is->video_st->codec->height,
AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
packet_queue_init(&is->videoq);
is->video_tid = SDL_CreateThread(video_thread, "video_thread", is);
break;
default:
break;
}
return 0;
}
int decode_thread(void *arg) {
VideoState *is = (VideoState *) arg;
AVFormatContext *pFormatCtx = NULL;
AVPacket pkt1, *packet = &pkt1;
int video_index = -1;
int audio_index = -1;
int i;
is->videoStream = -1;
is->audioStream = -1;
AVIOInterruptCB interupt_cb;
global_video_state = is;
// will interrup blocking functions if we quit!
interupt_cb.callback = decode_interrupt_cb;
interupt_cb.opaque = is;
if (avio_open2(&is->io_ctx, is->filename, 0, &interupt_cb, NULL)) {
fprintf(stderr, "Cannot open I/O for %s\n", is->filename);
return -1;
}
//Open video file
if (avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) {
return -1; //Couldn't open file
}
is->ic = pFormatCtx;
//Retrieve stream infomation
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
return -1; // Couldn't find stream information
}
//Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, is->filename, 0);
//Find the first video stream
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO
&& video_index < 0) {
video_index = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
&& audio_index < 0) {
audio_index = i;
}
}
if (audio_index >= 0) {
/* 所有設置SDL音頻流信息的步驟都在這個函數裏完成 */
audio_stream_component_open(is, audio_index);
}
if (video_index >= 0) {
video_stream_component_open(is, video_index);
}
if (is->videoStream < 0 || is->audioStream <= 0) {
fprintf(stderr, "%s: could not open codec\n", is->filename);
goto fail;
}
//main decode loop
/* 讀包的主循環, av_read_frame不停的從文件中讀取數據包*/
for (;;) {
if (is->quit) {
break;
}
//seek stuff goes here
/* 這裏audioq.size是指隊列中的所有數據包帶的音頻數據的總量或者視頻數據總量,並不是包的數量 */
if (is->audioq.size > MAX_AUDIO_SIZE || is->videoq.size > MAX_VIDEO_SIZE) {
SDL_Delay(10);
continue;
}
if (av_read_frame(is->ic, packet) < 0) {
if (is->ic->pb->error == 0) {
SDL_Delay(100); /* no error; wait for user input */
continue;
} else {
break;
}
}
// Is this a packet from the video stream?
if (packet->stream_index == is->videoStream) {
packet_queue_put(&is->videoq, packet);
} else if (packet->stream_index == is->audioStream) {
packet_queue_put(&is->audioq, packet);
} else {
av_free_packet(packet);
}
}
/*all done - wait for it*/
while (!is->quit) {
SDL_Delay(100);
}
fail: if (1) {
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
}
return 0;
}
int main(int argc, char *argv[]) {
char *filename = argv[1];
SDL_Event event;
VideoState *is;
is = av_malloc(sizeof(VideoState));
// Register all formats and codecs
av_register_all();
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
av_strlcpy(is->filename, filename, sizeof(is->filename));
is->pictq_mutex = SDL_CreateMutex();
is->pictq_cond = SDL_CreateCond();
schedule_refresh(is, 40);
is->parse_tid = SDL_CreateThread(decode_thread, "parse_thread", is);
if (!is->parse_tid) {
av_free(is);
return -1;
}
for (;;) {
SDL_WaitEvent(&event);
switch (event.type) {
case FF_QUIT_EVENT:
case SDL_QUIT:
SDL_CondSignal(is->audioq.cond);
SDL_CondSignal(is->videoq.cond);
is->quit = 1;
SDL_Quit();
return 0;
break;
case FF_ALLOC_EVENT:
alloc_picture(event.user.data1);
break;
case FF_REFRESH_EVENT:
video_refresh_timer(event.user.data1);
break;
}
}
return 0;
}