1、接口函數:兩個接口函數的主要的區別跟本地播放文件一樣,主要是傳入的是文件名還是流文件。
傳入參數還有就是是否需要循環播放、是否需要和mic 採集混音以及文件使用的編解碼、以及音量增益控制。
int StartPlayingFileAsMicrophone(int channel,
const char fileNameUTF8[1024],
bool loop = false,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) override;
int StartPlayingFileAsMicrophone(int channel,
InStream* stream,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) override;
2、根據channel 的數值,如果爲-1,則接口函數調用transmit_mixer 的接口,對全部發出通道進行混音或者替換。
如果爲其他數值,則混音或者替換對應通道的mic 採集。
int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
const char fileNameUTF8[1024],
bool loop,
bool mixWithMicrophone,
FileFormats format,
float volumeScaling) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
"loop=%d, mixWithMicrophone=%d, format=%d, "
"volumeScaling=%5.3f)",
channel, fileNameUTF8, loop, mixWithMicrophone, format,
volumeScaling);
static_assert(1024 == FileWrapper::kMaxFileNameSize, "");
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
const uint32_t startPointMs(0);
const uint32_t stopPointMs(0);
if (channel == -1) {
int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
NULL);
if (res) {
WEBRTC_TRACE(
kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return (-1);
} else {
_shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
return (0);
}
} else {
// Add file after demultiplexing <=> affects one channel only
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StartPlayingFileAsMicrophone() failed to locate channel");
return -1;
}
int res = channelPtr->StartPlayingFileAsMicrophone(
fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
NULL);
if (res) {
WEBRTC_TRACE(
kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return -1;
} else {
channelPtr->SetMixWithMicStatus(mixWithMicrophone);
return 0;
}
}
}
3、接下來會創建FilePlayer 類,用來解碼對應的文件或者流。
int Channel::StartPlayingFileAsMicrophone(const char* fileName,
bool loop,
FileFormats format,
int startPosition,
float volumeScaling,
int stopPosition,
const CodecInst* codecInst) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, "
"loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, "
"stopPosition=%d)",
fileName, loop, format, volumeScaling, startPosition,
stopPosition);
rtc::CritScope cs(&_fileCritSect);
if (channel_state_.Get().input_file_playing) {
_engineStatisticsPtr->SetLastError(
VE_ALREADY_PLAYING, kTraceWarning,
"StartPlayingFileAsMicrophone() filePlayer is playing");
return 0;
}
// Destroy the old instance
if (input_file_player_) {
input_file_player_->RegisterModuleFileCallback(NULL);
input_file_player_.reset();
}
// Create the instance
input_file_player_ = FilePlayer::CreateFilePlayer(_inputFilePlayerId,
(const FileFormats)format);
if (!input_file_player_) {
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartPlayingFileAsMicrophone() filePlayer format isnot correct");
return -1;
}
const uint32_t notificationTime(0);
if (input_file_player_->StartPlayingFile(
fileName, loop, startPosition, volumeScaling, notificationTime,
stopPosition, (const CodecInst*)codecInst) != 0) {
_engineStatisticsPtr->SetLastError(
VE_BAD_FILE, kTraceError,
"StartPlayingFile() failed to start file playout");
input_file_player_->StopPlayingFile();
input_file_player_.reset();
return -1;
}
input_file_player_->RegisterModuleFileCallback(this);
channel_state_.SetInputFilePlaying(true);
return 0;
}
4、得到解碼後的pcm 文件:
// TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
// a shared helper.
int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) {
std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
size_t fileSamples(0);
{
rtc::CritScope cs(&_fileCritSect);
if (!input_file_player_) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::MixOrReplaceAudioWithFile() fileplayer"
" doesnt exist");
return -1;
}
if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
mixingFrequency) == -1) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::MixOrReplaceAudioWithFile() file mixing "
"failed");
return -1;
}
if (fileSamples == 0) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::MixOrReplaceAudioWithFile() file is ended");
return 0;
}
}
assert(_audioFrame.samples_per_channel_ == fileSamples);
if (_mixFileWithMicrophone) {
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(),
1, fileSamples);
} else {
// Replace ACM audio with file.
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
_audioFrame.UpdateFrame(
_channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
}
return 0;
}