AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame, *pFrameRGB;
AVPacket *packet;
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
常見的函數與用法:
1、 void av_register_all(void);
/**
* Initialize libavformat and register all the muxers, demuxers and
* protocols. If you do not call this function, then you can select
* exactly which formats you want to support.
*
* @see av_register_input_format()
* @see av_register_output_format()
*/
void av_register_input_format(AVInputFormat *format);
void av_register_output_format(AVOutputFormat *format);
av_register_all(); //初始化FFMPEG 調用了這個才能正常使用編碼器和解碼器
2、 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
/**
* Open an input stream and read the header. The codecs are not opened.
* The stream must be closed with avformat_close_input().
*
* @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).
* May be a pointer to NULL, in which case an AVFormatContext is allocated by this
* function and written into ps.
* Note that a user-supplied AVFormatContext will be freed on failure.
* @param filename Name of the stream to open.
* @param fmt If non-NULL, this parameter forces a specific input format.
* Otherwise the format is autodetected.
* @param options A dictionary filled with AVFormatContext and demuxer-private options.
* On return this parameter will be destroyed and replaced with a dict containing
* options that were not found. May be NULL.
*
* @return 0 on success, a negative AVERROR on failure.
*
* @note If you want to use custom IO, preallocate the format context and set its pb field.
*/
avformat_open_input(&pFormatCtx, file_path, NULL, NULL) != 0
AVFormatContext pFormatCtx = avformat_alloc_context();//上下文結構體
3、 AVFormatContext *avformat_alloc_context(void);
/**
* Allocate an AVFormatContext.
* avformat_free_context() can be used to free the context and everything
* allocated by the framework within it.
*/
AVFormatContext pFormatCtx = avformat_alloc_context();//上下文結構體
4、 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);
/**
* Read packets of a media file to get stream information. This
* is useful for file formats with no headers such as MPEG. This
* function also computes the real framerate in case of MPEG-2 repeat
* frame mode.
* The logical file position is not changed by this function;
* examined packets may be buffered for later processing.
*
* @param ic media file handle
* @param options If non-NULL, an ic.nb_streams long array of pointers to
* dictionaries, where i-th member contains options for
* codec corresponding to i-th stream.
* On return each dictionary will be filled with options that were not found.
* @return >=0 if OK, AVERROR_xxx on error
*
* @note this function isn't guaranteed to open all the codecs, so
* options being non-empty at return is a perfectly normal behavior.
*
* @todo Let the user decide somehow what information is needed so that
* we do not waste time getting stuff the user does not need.
*/
avformat_find_stream_info(pFormatCtx, NULL) < 0 //小於0位失敗
5、 AVCodec *avcodec_find_decoder(enum AVCodecID id);
/**
* Find a registered decoder with a matching codec ID.
*
* @param id AVCodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
aCodecCtx = pFormatCtx->streams[audioStream]->codec;//解碼器的結構體
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);//得到使用的那個編碼格式
6、 AVFrame *av_frame_alloc(void);
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
*
* @return An AVFrame filled with default values or NULL on failure.
*
* @note this only allocates the AVFrame itself, not the data buffers. Those
* must be allocated through other means, e.g. with av_frame_get_buffer() or
* manually.
*/
AVFrame pFrame = av_frame_alloc();
7、 int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]);
/**
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*/
sws_scale(img_convert_ctx,
(uint8_t const * const *) pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
pFrameRGB->linesize);
7、 struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
/**
* Allocate and return an SwsContext. You need it to perform
* scaling/conversion operations using sws_scale().
*
* @param srcW the width of the source image
* @param srcH the height of the source image
* @param srcFormat the source image format
* @param dstW the width of the destination image
* @param dstH the height of the destination image
* @param dstFormat the destination image format
* @param flags specify which algorithm and options to use for rescaling
* @return a pointer to an allocated context, or NULL in case of error
* @note this function is to be removed after a saner alternative is
* written
*/
struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
//#define SWS_BICUBIC 4
//#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32 // AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
8、 int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
/**
* Calculate the size in bytes that a picture of the given width and height
* would occupy if stored in the given picture format.
*
* @param pix_fmt picture pixel format
* @param width picture width
* @param height picture height
* @return the computed picture buffer size or a negative error code
* in case of error
*
* @see av_image_get_buffer_size().
*/
int numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);//
//enum AVPixelFormat
9、 void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
/**
* Allocate a block of size bytes with alignment suitable for all
* memory accesses (including vectors if available on the CPU).
* @param size Size in bytes for the memory block to be allocated.
* @return Pointer to the allocated block, NULL if the block cannot
* be allocated.
* @see av_mallocz()
*/
uint8_t * out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
10、 int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
enum AVPixelFormat pix_fmt, int width, int height);
/**
* Setup the picture fields based on the specified image parameters
* and the provided image data buffer.
*
* The picture fields are filled in by using the image data buffer
* pointed to by ptr.
*
* If ptr is NULL, the function will fill only the picture linesize
* array and return the required size for the image buffer.
*
* To allocate an image buffer and fill the picture data in one call,
* use avpicture_alloc().
*
* @param picture the picture to be filled in
* @param ptr buffer where the image data is stored, or NULL
* @param pix_fmt the pixel format of the image
* @param width the width of the image in pixels
* @param height the height of the image in pixels
* @return the size in bytes required for src, a negative error code
* in case of failure
*
* @see av_image_fill_arrays()
*/
avpicture_fill((AVPicture *) pFrameRGB, out_buffer, PIX_FMT_RGB32,
pCodecCtx->width, pCodecCtx->height);
11、 int av_new_packet(AVPacket *pkt, int size);
/**
* Allocate the payload of a packet and initialize its fields with
* default values.
*
* @param pkt packet
* @param size wanted payload size
* @return 0 if OK, AVERROR_xxx otherwise
*/
packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一個packet
av_new_packet(packet, y_size); //分配packet的數據
12、 void av_dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
/**
* Print detailed information about the input or output format, such as
* duration, bitrate, streams, container, programs, metadata, side data,
* codec and time base.
*
* @param ic the context to analyze
* @param index index of the stream to dump information about
* @param url the URL to print, such as source or destination file
* @param is_output Select whether the specified context is an input(0) or output(1)
*/
av_dump_format(pFormatCtx, 0, file_path, 0); //輸出視頻信息
13、 int av_read_frame(AVFormatContext *s, AVPacket *pkt);
/**
* Return the next frame of a stream.
* This function returns what is stored in the file, and does not validate
* that what is there are valid frames for the decoder. It will split what is
* stored in the file into frames and return one for each call. It will not
* omit invalid data between valid frames so as to give the decoder the maximum
* information possible for decoding.
*
* If pkt->buf is NULL, then the packet is valid until the next
* av_read_frame() or until avformat_close_input(). Otherwise the packet
* is valid indefinitely. In both cases the packet must be freed with
* av_free_packet when it is no longer needed. For video, the packet contains
* exactly one frame. For audio, it contains an integer number of frames if each
* frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames
* have a variable size (e.g. MPEG audio), then it contains one frame.
*
* pkt->pts, pkt->dts and pkt->duration are always set to correct
* values in AVStream.time_base units (and guessed if the format cannot
* provide them). pkt->pts can be AV_NOPTS_VALUE if the video format
* has B-frames, so it is better to rely on pkt->dts if you do not
* decompress the payload.
*
* @return 0 if OK, < 0 on error or end of file
*/
if (av_read_frame(pFormatCtx, packet) < 0)
{
break; //這裏認爲視頻讀取完了
}
14、 int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
const AVPacket *avpkt);
/**
* Decode the video frame of size avpkt->size from avpkt->data into picture.
* Some decoders may support multiple frames in a single AVPacket, such
* decoders would then just decode the first frame.
*
* @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
* the actual read bytes because some optimized bitstream readers read 32 or 64
* bits at once and could read over the end.
*
* @warning The end of the input buffer buf should be set to 0 to ensure that
* no overreading happens for damaged MPEG streams.
*
* @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
* between input and output, these need to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to return the remaining frames.
*
* @param avctx the codec context
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
* Use av_frame_alloc() to get an AVFrame. The codec will
* allocate memory for the actual bitmap by calling the
* AVCodecContext.get_buffer2() callback.
* When AVCodecContext.refcounted_frames is set to 1, the frame is
* reference counted and the returned reference belongs to the
* caller. The caller must release the frame using av_frame_unref()
* when the frame is no longer needed. The caller may safely write
* to the frame if av_frame_is_writable() returns 1.
* When AVCodecContext.refcounted_frames is set to 0, the returned
* reference belongs to the decoder and is valid only until the
* next call to this function or until closing or flushing the
* decoder. The caller may not write to it.
*
* @param[in] avpkt The input AVPacket containing the input buffer.
* You can create such packet with av_init_packet() and by then setting
* data and size, some decoders might in addition need other fields like
* flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
* fields possible.
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
* @return On error a negative value is returned, otherwise the number of bytes
* used or zero if no frame could be decompressed.
*/
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
////////////////////////////////////////////////////////////////////////
//一般使用簡單的ffmpeg處理視音頻的時候用的步驟和一些結構體介紹
//結構體的介紹
AVFormatContext
iformat:輸入視頻的AVInputFormat
nb_streams :輸入視頻的AVStream 個數
streams :輸入視頻的AVStream []數組(AVStream)
duration :輸入視頻的時長(以微秒爲單位)
bit_rate :輸入視頻的碼率
//一、獲取音。視頻流的在數組中的標號
int videoStream = -1;
int audioStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) { //一般視頻爲0個 音頻爲第1個//獲取每個流對應的標號
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0)
{
audioStream = i;
}
}
//如果videoStream爲-1 說明沒有找到視頻流
if (videoStream == -1) {
printf("Didn't find a video stream.\n");
return;
}
if (audioStream == -1) {
printf("Didn't find a audio stream.\n");
return;
}
////end 1////
//結構體簡單的介紹
AVStream
id:序號
codec:該流對應的AVCodecContext
time_base:該流的時基
r_frame_rate:該流的幀率
AVCodecContext
codec:編解碼器的AVCodec
width, height:圖像的寬高(只針對視頻)
pix_fmt:像素格式(只針對視頻)
sample_rate:採樣率(只針對音頻)
channels:聲道數(只針對音頻)
sample_fmt:採樣格式(只針對音頻)
AVCodec
name:編解碼器名稱
long_name:編解碼器長名稱
type:編解碼器類型
id:編解碼器ID
一些編解碼的接口函數
//二、獲取對應數據流的解碼器,查找解碼器
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
aCodecCtx = pFormatCtx->streams[audioStream]->codec;//解碼器的結構體
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);//得到使用的那個編碼格式
if (aCodec == NULL) {//判斷是否找到編碼器,若沒有沒有找到解碼器
printf("ACodec not found.\n");
return;
}
////end 2///
//三、打開對應音視頻解碼器,打開解碼器
if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) {
printf("Could not open audio codec.\n");
return;
}
/////end 3/////
AVPacket
pts:顯示時間戳
dts :解碼時間戳
data :壓縮編碼數據
size :壓縮編碼數據大小
stream_index :所屬的AVStream
AVFrame
data:解碼後的圖像像素數據(音頻採樣數據)。
linesize:對視頻來說是圖像中一行像素的大小;對音頻來說是整個音頻幀的大小。
width, height:圖像的寬高(只針對視頻)。
key_frame:是否爲關鍵幀(只針對視頻) 。
pict_type:幀類型(只針對視頻) 。例如I,P,B。
//
AVFrame pFrame = av_frame_alloc();
AVFrame pFrameRGB = av_frame_alloc();
//將解碼後的YUV數據轉換成RGB32
static struct SwsContext *img_convert_ctx;//?????幹
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
int numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);
out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrameRGB, out_buffer, PIX_FMT_RGB32,
pCodecCtx->width, pCodecCtx->height);
int y_size = pCodecCtx->width * pCodecCtx->height;
packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一個packet
av_new_packet(packet, y_size); //分配packet的數據
av_dump_format(pFormatCtx, 0, file_path, 0); //輸出視頻信息
//輸出
while (1)
{
if (av_read_frame(pFormatCtx, packet) < 0)//每次讀一個packet
{
break; //這裏認爲視頻讀取完了
}
if (packet->stream_index == videoStream) {//所屬的AVStream,就是前面說的下標
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
if (ret < 0) {
printf("decode error.\n");
return;
}
if (got_picture) {
sws_scale(img_convert_ctx,
(uint8_t const * const *) pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
pFrameRGB->linesize);
//把這個RGB數據 用QImage加載
QImage tmpImg((uchar *)out_buffer,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
QImage image = tmpImg.copy(); //把圖像複製一份 傳遞給界面顯示
emit sig_GetOneFrame(image); //發送信號
}
}
av_free_packet(packet);//將這一幀釋放掉
msleep(15); //停一停 不然放的太快了
}
//////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////
//關於SDL2部分的使用
///////////////////////////////////////////////