本文基于以下文章进行的开发:
1.新建AudioPlayer类
package com.test.ffmpeg;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.util.Log;
/**
* Created by ygdx_lk on 17/11/2.
*/
public class AudioPlayer {
private AudioTrack audioTrack;
public native void sound(String input, String output);
public void createAudio(int sampleRateInHz, int nb_channals){
int channaleConfig;
if(nb_channals == 1){
channaleConfig = AudioFormat.CHANNEL_OUT_MONO;//单声道
}else if(nb_channals == 2){
channaleConfig = AudioFormat.CHANNEL_OUT_STEREO;//立体声
}else{
channaleConfig = AudioFormat.CHANNEL_OUT_MONO;
}
// AudioFormat : 有ENCODING_PCM_16BIT和ENCODING_PCM_8BIT两种音频编码格式。同样的,官方声明只有ENCODING_PCM_16BIT是所有设备都支持的。
int buffersize = AudioTrack.getMinBufferSize(sampleRateInHz, channaleConfig, AudioFormat.ENCODING_PCM_16BIT);
//public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,int bufferSizeInBytes, int mode)
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz, channaleConfig,
AudioFormat.ENCODING_PCM_16BIT, buffersize, AudioTrack.MODE_STREAM);
audioTrack.play();
}
private static final String TAG = "AudioPlayer";
public synchronized void playTrack(byte[] buffer, int length){
if(audioTrack != null && audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING){
Log.i(TAG, "playTrack: " + length);
audioTrack.write(buffer, 0, length);
}
}
}
2.native-lib.h中添加如下方法
JNIEXPORT void JNICALL Java_com_test_ffmpeg_AudioPlayer_sound(JNIEnv *env, jobject instance, jstring input_, jstring output_);
3.native-lib.cpp中实现
void JNICALL Java_com_test_ffmpeg_AudioPlayer_sound(JNIEnv *env, jobject instance, jstring input_, jstring output_) {
const char *input = env->GetStringUTFChars(input_, 0);
const char *output = env->GetStringUTFChars(output_, 0);
//组件注册
av_register_all();
//获取AVFormatContext, 描述了一个媒体文件或媒体流的构成和基本信息
AVFormatContext *avFormatContext = avformat_alloc_context();
if(avformat_open_input(&avFormatContext, input, NULL, NULL) < 0){
LOGE("%s", "打开文件失败");
return;
}
if(avformat_find_stream_info(avFormatContext, NULL) < 0){
LOGE("%s", "获取音频信息失败");
return;
}
//寻找音频位置
int idx_audio_stream = -1;
for (int i = 0; i < avFormatContext->nb_streams; ++i) {
int type = avFormatContext->streams[i]->codec->codec_type;
if(type == AVMEDIA_TYPE_AUDIO){
LOGE("找到音频 %d", type);
idx_audio_stream = i;
break;
}
}
//获取音频解码上下文
AVCodecContext *avCodecContext = avFormatContext->streams[idx_audio_stream]->codec;
//AVCodec是存储编解码信息的结构体
AVCodec *avCodec = avcodec_find_decoder(avCodecContext->codec_id);
//打开解码器
if(avcodec_open2(avCodecContext, avCodec, NULL) < 0){
LOGE("%s", "打开解码器失败")
}
//初始化参数
/**
s:现有Swr上下文(如果可用),如果不可用则为NULL
out_ch_layout:输出通道布局(AV_CH_LAYOUT_ *)
out_sample_fmt:输出采样格式(AV_SAMPLE_FMT_ *)。
out_sample_rate:输出采样率(频率(Hz))
in_ch_layout:输入通道布局(AV_CH_LAYOUT_ *)
in_sample_fmt:输入样品格式(AV_SAMPLE_FMT_ *)。
in_sample_rate:输入采样率(频率(Hz))
log_offset:记录级别偏移
log_ctx:父记录上下文,可以为NULL
struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
int log_offset, void *log_ctx);
*/
//将mp3包含的编码格式,转换成pcm
//SwrContext是重新采样结构体
SwrContext *swrContext = swr_alloc();
uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
//输出采样率
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
//输出的采样率与输入相同
int out_sample_rate = avCodecContext->sample_rate;
swr_alloc_set_opts(swrContext, out_ch_layout, out_sample_fmt, out_sample_rate,
avCodecContext->channel_layout, avCodecContext->sample_fmt,
avCodecContext->sample_rate, 0, NULL);
//一旦设置了所有值,它必须用swr_init()初始化
swr_init(swrContext);
//反射得到Class类型
jclass audio_player = env->GetObjectClass(instance);
//反射得到createAudio方法
jmethodID createAudio = env->GetMethodID(audio_player, "createAudio", "(II)V");
//获取通道数
int out_channel_nb = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
//反射调用createAudio
env->CallVoidMethod(instance, createAudio, 44100, out_channel_nb);
jmethodID playTrack = env->GetMethodID(audio_player, "playTrack", "([BI)V");
//输出文件
// FILE *pcm_file = fopen(output, "wb");
//AVPacket是存储压缩编码数据相关信息的结构体
AVPacket *avPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
//解封装后的帧数据
AVFrame *avFrame = av_frame_alloc();
int got_frame;
//44100 * 2
uint8_t *out_buffer = (uint8_t *) av_malloc(44100 * 2);
//解码
while (av_read_frame(avFormatContext, avPacket) >= 0){
if(avPacket->stream_index == idx_audio_stream){
//Decode the audio frame of size avpkt->size from avpkt->data into frame
//从avPacket中解码,得到avFrame
avcodec_decode_audio4(avCodecContext, avFrame, &got_frame, avPacket);
if(got_frame){
LOGI("%s", "解码");
//读取avFrame->data,放入out_buffer
//int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, const uint8_t **in , int in_count)
swr_convert(swrContext, &out_buffer, 44100 * 2, (const uint8_t **) avFrame->data, avFrame->nb_samples);
//获取缓冲区大小
//int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align);
int size = av_samples_get_buffer_size(NULL, out_channel_nb, avFrame->nb_samples, AV_SAMPLE_FMT_S16, 1);
jbyteArray audio_sample_array = env->NewByteArray(size);
// void SetByteArrayRegion(jbyteArray array, jsize start, jsize len, const jbyte* buf)
env->SetByteArrayRegion(audio_sample_array, 0, size, (const jbyte *) out_buffer);
//调用java的playTrack方法,进行播放
env->CallVoidMethod(instance, playTrack, audio_sample_array, size);
env->DeleteLocalRef(audio_sample_array);
// fwrite(out_buffer, 1, size, pcm_file);
}
//随时释放
av_free_packet(avPacket);
}
}
//释放
// fclose(pcm_file);
av_frame_free(&avFrame);
swr_free(&swrContext);
avcodec_close(avCodecContext);
avformat_close_input(&avFormatContext);
env->ReleaseStringUTFChars(input_, input);
env->ReleaseStringUTFChars(output_, output);
}
4.MainActivity中添加一个按钮,点击后调用下面代代
final String input = new File(Environment.getExternalStorageDirectory(),"input.mp3").getAbsolutePath();
final String output = new File(Environment.getExternalStorageDirectory(),"output.pcm").getAbsolutePath();
new Thread(new Runnable() {
@Override
public void run() {
new AudioPlayer().sound(input, output);
}
}).start();