一、目標
1、使用vue.js作爲前端javascript框架,結合html5+ES6語法,在移動端Chrome瀏覽器中,完成兼容Android和iOS的錄音功能;
2、基於一般項目的訴求,封裝成可複用的錄音組件;
二、步驟
1、基於上篇博文(Vue.js實戰——封裝瀏覽器拍照組件_5)搭建的項目框架(geo_location6),複製粘貼生成geo_location7項目;
2、錄音的核心組件解析:
1)錄音組件核心代碼Recorder.js如下(生成wav格式的音頻文件,代碼裏面有比較詳盡的註釋):
export default class Recorder {
constructor(stream, config) {
//兼容
window.URL = window.URL || window.webkitURL;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
config = config || {};
config.sampleBits = config.sampleBits || 16; //採樣數位 8, 16
config.sampleRate = config.sampleRate || 8000; //採樣率(1/6 44100)
this.context = new (window.webkitAudioContext || window.AudioContext)();
this.audioInput = this.context.createMediaStreamSource(stream);
this.createScript = this.context.createScriptProcessor || this.context.createJavaScriptNode;
this.recorder = this.createScript.apply(this.context, [4096, 1, 1]);
this.audioData = {
size: 0, //錄音文件長度
buffer: [], //錄音緩存
inputSampleRate: this.context.sampleRate, //輸入採樣率
inputSampleBits: 16, //輸入採樣數位 8, 16
outputSampleRate: config.sampleRate, //輸出採樣率
oututSampleBits: config.sampleBits, //輸出採樣數位 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data));
this.size += data.length;
},
compress: function () { //合併壓縮
//合併
let data = new Float32Array(this.size);
let offset = 0;
for (let i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset);
offset += this.buffer[i].length;
}
//壓縮
let compression = parseInt(this.inputSampleRate / this.outputSampleRate);
let length = data.length / compression;
let result = new Float32Array(length);
let index = 0, j = 0;
while (index < length) {
result[index] = data[j];
j += compression;
index++;
}
return result;
},
encodeWAV: function () {
let sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
let sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
let bytes = this.compress();
let dataLength = bytes.length * (sampleBits / 8);
let buffer = new ArrayBuffer(44 + dataLength);
let data = new DataView(buffer);
let channelCount = 1;//單聲道
let offset = 0;
let writeString = function (str) {
for (let i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i));
}
};
// 資源交換文件標識符
writeString('RIFF');
offset += 4;
// 下個地址開始到文件尾總字節數,即文件大小-8
data.setUint32(offset, 36 + dataLength, true);
offset += 4;
// WAV文件標誌
writeString('WAVE');
offset += 4;
// 波形格式標誌
writeString('fmt ');
offset += 4;
// 過濾字節,一般爲 0x10 = 16
data.setUint32(offset, 16, true);
offset += 4;
// 格式類別 (PCM形式採樣數據)
data.setUint16(offset, 1, true);
offset += 2;
// 通道數
data.setUint16(offset, channelCount, true);
offset += 2;
// 採樣率,每秒樣本數,表示每個通道的播放速度
data.setUint32(offset, sampleRate, true);
offset += 4;
// 波形數據傳輸率 (每秒平均字節數) 單聲道×每秒數據位數×每樣本數據位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true);
offset += 4;
// 快數據調整數 採樣一次佔用字節數 單聲道×每樣本的數據位數/8
data.setUint16(offset, channelCount * (sampleBits / 8), true);
offset += 2;
// 每樣本數據位數
data.setUint16(offset, sampleBits, true);
offset += 2;
// 數據標識符
writeString('data');
offset += 4;
// 採樣數據總數,即數據總大小-44
data.setUint32(offset, dataLength, true);
offset += 4;
// 寫入採樣數據
if (sampleBits === 8) {
for (let i = 0; i < bytes.length; i++, offset++) {
let s = Math.max(-1, Math.min(1, bytes[i]));
let val = s < 0 ? s * 0x8000 : s * 0x7FFF;
val = parseInt(255 / (65535 / (val + 32768)));
data.setInt8(offset, val, true);
}
} else {
for (let i = 0; i < bytes.length; i++, offset += 2) {
let s = Math.max(-1, Math.min(1, bytes[i]));
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
return new Blob([data], {type: 'audio/wav'});
}
};
}
//開始錄音
start() {
this.audioInput.connect(this.recorder);
this.recorder.connect(this.context.destination);
//音頻採集
let self = this;
this.recorder.onaudioprocess = function (e) {
self.audioData.input(e.inputBuffer.getChannelData(0));
};
};
//停止
stop() {
this.recorder.disconnect();
};
//獲取音頻文件
getBlob() {
this.stop();
return this.audioData.encodeWAV();
};
//回放
play(audio) {
audio.src = window.URL.createObjectURL(this.getBlob());
};
//清理緩存的錄音數據
clear() {
this.audioData.buffer = [];
this.audioData.size = 0;
};
static throwError(message) {
console.log("Error:" + message);
throw new function () {
this.toString = function () {
return message;
}
};
};
static canRecording() {
return (navigator.getUserMedia != null);
}
static get(callback, config) {
if (callback) {
if (Recorder.canRecording()) {
navigator.getUserMedia(
{audio: true}, //只啓用音頻
function (stream) {
let rec = new Recorder(stream, config);
callback(rec);
},
function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
Recorder.throwError('用戶拒絕提供信息。');
break;
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
Recorder.throwError('瀏覽器不支持硬件設備。');
break;
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
Recorder.throwError('無法發現指定的硬件設備。');
break;
default:
Recorder.throwError('無法打開麥克風。異常信息:' + (error.code || error.name));
break;
}
});
} else {
Recorder.throwError('當前瀏覽器不支持錄音功能。');
return;
}
}
};
}
2)注意點:
a、音頻格式有很多種,常見的有wav/mp3/amr等。其中部分是專有版權,比如mp3。wav是微軟制定的標準,amr是微信內置語音的音頻格式。網上比較常見的是使用wav和mp3格式處理、轉換的邏輯代碼,比如上述核心代碼就是生成一個wav音頻文件。但是amr具有文件特別小的優點。如果系統後臺要同時處理原生web端和微信平臺生成的音頻文件,上述代碼可以考慮改成直接生成amr音頻格式(留待你去實現)。我們項目目前採用的是前端生成wav,然後後臺識別是amr還是wav,如果是wav,則解碼成amr再進行業務處理;
b、音頻協議非常複雜,每個格式都有專門的協議,目前有個FFmpeg開源框架(軟件包)對各個音頻格式進行轉換,可以作爲驗證工具在項目中使用。實際考慮到性能和安全性,均需要通過解析代碼實現,而不是通過shell命令調用FFmpeg來實現;
c、採樣位數(8/16,代表單聲道和雙聲道)和採樣率很重要,關係到最後能不能通過前輩們貢獻的代碼來轉換和編解碼。目前我只看到有解析雙聲道和採樣率爲8000的wav音頻的C語言代碼。但是這種採樣位數和採樣率的wav音頻文件較大,網絡傳輸比較佔流量,需要做好取捨。
3)核心錄音組件封裝比較完美,但是代碼裏面有太多的協議相關的實現細節,對外提供時,API不易被理解,所以再封裝了一層。
record-sdk.js代碼如下:
import Recorder from "./Recorder";
export default class Record {
startRecord(param) {
let self = this;
try {
Recorder.get(rec => {
console.log("init recorder component now.");
self.recorder = rec;
self.recorder.start();
console.log("start record now.");
param.success("record successfully!");
});
} catch (e) {
param.error("record failed!" + e);
}
}
stopRecord(param) {
console.log("stop record now.");
let self = this;
try {
let blobData = self.recorder.getBlob();
param.success(blobData);
} catch (e) {
param.error("record stop failed!" + e);
}
}
play(audio) {
console.log("start play record now.");
let self = this;
try {
self.recorder.play(audio);
console.log("play successfully.");
} catch (e) {
console.log("play record failed!" + e);
}
}
}
只暴露錄音,停止錄音和播放錄音3個基本功能。其中停止錄音時,可以獲取對應的音頻文件對象。
3、編寫錄音的功能代碼。需要新增文件清單:
src/pages/record.html
src/components/Record.vue
src/js/record.js
src/commons/record-sdk.js
src/commons/Recorder.js
並同時修改webpack.config.js文件以集成錄音功能代碼。源碼詳見:geo_location7,此處僅列舉下關鍵源碼:
1)src/components/Record.vue關鍵代碼如下:
<template>
<div class="record">
<h1>Click following button to record voice:</h1>
<input @click="startRecord" type="button" value="錄音">
<input @click="stopRecord" type="button" value="停止">
<input @click="play" type="button" value="播放">
<div class="record-play" v-show="isFinished">
<h2>Current voice player is:</h2>
<audio controls autoplay></audio>
</div>
</div>
</template>
<script>
import Record from "../commons/record-sdk";
export default {
name: "Record",
data() {
return {
isFinished: false,
audio: "",
recorder: new Record()
};
},
methods: {
startRecord: function() {
console.log("start to record now.");
let self = this;
self.isFinished = false;
self.recorder.startRecord({
success: res => {
console.log("start record successfully.");
},
error: res => {
console.log("start record failed.");
}
});
},
stopRecord: function() {
console.log("stop record now.");
let self = this;
self.isFinished = false;
self.recorder.stopRecord({
success: res => {
//此處可以獲取音頻源文件(res),用於上傳等操作
console.log("stop record successfully.");
},
error: res => {
console.log("stop record failed.");
}
});
},
play: function() {
console.log("play record now.");
let self = this;
self.isFinished = true;
self.audio = document.querySelector("audio");
self.recorder.play(self.audio);
}
}
};
</script>
其中,<audio controls autoplay></audio>是添加了一個html5原生的音頻播放器。
4、全部代碼編寫完畢後,執行命令npm run dev開始調試運行效果,主界面如下:
5、點擊”錄音”按鈕,瀏覽器會出現如下彈框:
5、點擊”允許”按鈕,授權獲取瀏覽器錄音權限,會出現如下正在錄音的標誌。
6、隨便說幾句話,點擊“停止”按鈕,停止錄音。再點擊"播放"按鈕。會出現音頻播放器,並同時播放你剛纔錄製的音頻。
三、總結
1、音頻處理尤其複雜,建議儘量用已有的開源庫和前輩的經驗去處理,不要重複造輪子;
2、要注意vue.js中的v-if和v-show的區別,v-show相當於標籤的display:none,表明div內的所有組件已經加載過了,只是不可見,而v-if後面的布爾值變化時,會重新加載生成dom樹。本例中點擊播放時,需要顯示播放器,並立即播放,如果使用v-if則會出現第一次點擊播放時,裏面的audio組件還沒初始化完成,導致找不到audio組件而報錯。
4、參考資料
[1] https://www.cnblogs.com/blqw/p/3782420.html
[2] https://blog.csdn.net/bzhou0125/article/details/46444201
[3]https://blog.csdn.net/wulinbanxia/article/details/73521325