背景:我這個代碼是在我的Android demo 裏截取出來的,項目裏是實現了從相冊裏選擇一個視頻,然後每秒生成一張截圖;當然這個代碼在非android 項目也是可以使用的。
詳情可以參考我的項目:https://gitee.com/niangegelaile/Demo 裏的ffmpeg 模塊;
頭文件:decode_video_to_img.h
#ifndef DEMO_DECODE_VIDEO_TO_IMG_H
#define DEMO_DECODE_VIDEO_TO_IMG_H
int createImg(char* inputFileName, char * outputFileName);
#endif //DEMO_DECODE_VIDEO_TO_IMG_H
C文件:decode_video_to_img.c
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include "./ALog.h"
#define WORD uint16_t
#define DWORD uint32_t
#define LONG int32_t
#pragma pack(2)
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER, *PBITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
} BITMAPINFOHEADER, *PBITMAPINFOHEADER;
int saveAsBitmap(AVFrame *pFrameRGB, int width, int height, char *fileName)
{
FILE *pFile = NULL;
BITMAPFILEHEADER bmpheader;
BITMAPINFOHEADER bmpinfo;
int bpp = 24;
// open file
pFile = fopen(fileName, "wb");
if (!pFile)
return 0;
bmpheader.bfType = 0x4d42; //'BM';
bmpheader.bfReserved1 = 0;
bmpheader.bfReserved2 = 0;
bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
bmpheader.bfSize = bmpheader.bfOffBits + width*height*bpp/8;
bmpinfo.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.biWidth = width;
bmpinfo.biHeight = -height; //reverse the image
bmpinfo.biPlanes = 1;
bmpinfo.biBitCount = bpp;
bmpinfo.biCompression = 0;
bmpinfo.biSizeImage = 0;
bmpinfo.biXPelsPerMeter = 100;
bmpinfo.biYPelsPerMeter = 100;
bmpinfo.biClrUsed = 0;
bmpinfo.biClrImportant = 0;
fwrite(&bmpheader, sizeof(BITMAPFILEHEADER), 1, pFile);
fwrite(&bmpinfo, sizeof(BITMAPINFOHEADER), 1, pFile);
uint8_t *buffer = pFrameRGB->data[0];
for (int h=0; h<height; h++)
{
for (int w=0; w<width; w++)
{
fwrite(buffer, 1, 1, pFile);
fwrite(buffer+1, 1, 1, pFile);
fwrite(buffer+2, 1, 1, pFile);
buffer += 3;
}
}
fclose(pFile);
return 1;
}
void saveBMP(struct SwsContext *img_convert_ctx, AVFrame *frame, char *filename)
{
//1 先進行轉換, YUV420=>RGB24:
int w = frame->width;
int h = frame->height;
int numBytes=avpicture_get_size(AV_PIX_FMT_BGR24, w/3, h/3);
uint8_t *buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
ALOGE("numbtyes:%d",numBytes);
AVFrame *pFrameRGB = av_frame_alloc();
/* buffer is going to be written to rawvideo file, no alignment */
/*
if (av_image_alloc(pFrameRGB->data, pFrameRGB->linesize,
w, h, AV_PIX_FMT_BGR24, pix_fmt, 1) < 0) {
fprintf(stderr, "Could not allocate destination image\n");
exit(1);
}
*/
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, w/3, h/3);
sws_scale(img_convert_ctx, frame->data, frame->linesize,
0, h, pFrameRGB->data, pFrameRGB->linesize);
saveAsBitmap(pFrameRGB,w/3,h/3,filename);
//釋放資源
//av_free(buffer);
av_freep(&pFrameRGB[0]);
av_free(pFrameRGB);
}
static int decode_write_frame(AVStream *stream,const char *outfilename, AVCodecContext *avctx,
struct SwsContext *img_convert_ctx, AVFrame *frame, AVPacket *pkt, int last)
{
ALOGE("call decode_write_frame ");
int len, got_frame;
char buf[1024];
double second= frame->pts * av_q2d(stream->time_base);
ALOGE("frame->pts=%ld",frame->pts);
ALOGE("秒=%lf",second);
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
ALOGE("got_frame =%d",got_frame);
ALOGE("len =%d",len);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %lf\n", second);
return len;
}
if (got_frame) {
printf("Saving %sframe %lf\n", last ? "last " : "", second);
fflush(stdout);
ALOGE("outfilename=%s",outfilename);
/* the picture is allocated by the decoder, no need to free it */
snprintf(buf, sizeof(buf), "%s-%lf.bmp", outfilename, second);
/*
pgm_save(frame->data[0], frame->linesize[0],
frame->width, frame->height, buf);
*/
ALOGE("buf=%s",buf);
saveBMP(img_convert_ctx, frame, buf);
return 1;
}
return 0;
}
int createImg(char* inputFileName, char * outputFileName)
{
int ret;
AVFormatContext *fmt_ctx = NULL;
const AVCodec *codec;
AVCodecContext *c= NULL;
AVStream *st = NULL;
int stream_index;
AVFrame *frame;
struct SwsContext *img_convert_ctx;
AVPacket avpkt;
/* register all formats and codecs */
av_register_all();
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, inputFileName, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", inputFileName);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, inputFileName, 0);
av_init_packet(&avpkt);
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(AVMEDIA_TYPE_VIDEO), inputFileName);
return ret;
}
stream_index = ret;
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
codec = avcodec_find_decoder(st->codecpar->codec_id);
if (!codec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
return AVERROR(EINVAL);
}
c = avcodec_alloc_context3(NULL);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(c, st->codecpar)) < 0) {
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
return ret;
}
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
img_convert_ctx = sws_getContext(c->width, c->height,
c->pix_fmt,
c->width/3, c->height/3,
AV_PIX_FMT_BGR24,
SWS_BICUBIC, NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
int from_seconds=0;
ret=av_seek_frame(fmt_ctx, -1,from_seconds*AV_TIME_BASE+fmt_ctx->start_time, AVSEEK_FLAG_BACKWARD);
ALOGE("av_seek_frame ret=%d",ret);
while (ret>=0) {
while((av_read_frame(fmt_ctx, &avpkt) )>= 0){
ALOGE("av_read_frame ret=%d",ret);
if(avpkt.stream_index == stream_index){
int decodeResult=decode_write_frame(st,outputFileName,c,img_convert_ctx, frame, &avpkt, 0);
if ( decodeResult< 0){
av_packet_unref(&avpkt);
goto end;
}else if(decodeResult>0){
av_packet_unref(&avpkt);
break;
} else{
av_packet_unref(&avpkt);
}
}
}
avpkt.data = NULL;
avpkt.size = 0;
from_seconds++;
ALOGE("from_seconds =%d",from_seconds);
if(from_seconds*AV_TIME_BASE+fmt_ctx->start_time>fmt_ctx->start_time+fmt_ctx->duration){
goto end;
}
ret=av_seek_frame(fmt_ctx, -1,from_seconds*AV_TIME_BASE+fmt_ctx->start_time, AVSEEK_FLAG_BACKWARD);
}
end:
avpkt.data = NULL;
avpkt.size = 0;
avformat_close_input(&fmt_ctx);
sws_freeContext(img_convert_ctx);
avcodec_free_context(&c);
av_frame_free(&frame);
return 0;
}
這個是構建一個方法供java調用:
extern "C"{
#include "./decode_video_to_img.h"
JNIEXPORT void JNICALL
Java_com_example_ffmpeg_JniUtil_createImg(JNIEnv *env, jclass clazz, jstring srcFile,
jstring dstFile) {
const char *src_file = env->GetStringUTFChars(srcFile, 0);
const char *dst_file = env->GetStringUTFChars(dstFile, 0);
createImg(const_cast<char *>(src_file), const_cast<char *>(dst_file));
env->ReleaseStringUTFChars(srcFile, src_file);
env->ReleaseStringUTFChars(dstFile, dst_file);
}
}