functioncombineHeaderAndChunk(header:ArrayBuffer, chunk:ArrayBuffer) {
// Create a new ArrayBuffer to hold both the header and the chunkconst combinedBuffer = newArrayBuffer(header.byteLength + chunk.byteLength);
// Create a Uint8Array view of the combined bufferconst combinedView = newUint8Array(combinedBuffer);
// Copy the header into the combined buffer
combinedView.set(newUint8Array(header), 0);
// Copy the chunk data after the header
combinedView.set(newUint8Array(chunk), header.byteLength);
return combinedBuffer;
}
// Usage examplelet storedHeader = null;
let isFirstChunk = true;
ws.onmessage = function(event) {
if (isFirstChunk) {
// Assume the first 44 bytes are the header
storedHeader = event.data.slice(0, 44);
// const headerInfo = parseWavHeader(storedHeader);// console.log("WAV Header Info:", headerInfo);// Handle the rest of the first chunk as audio dataconst firstChunkData = event.data.slice(44);
const combinedData = combineHeaderAndChunk(storedHeader, firstChunkData);
processAudioData(combinedData);
isFirstChunk = false;
} else {
// For subsequent chunks, combine with the stored headerconst combinedData = combineHeaderAndChunk(storedHeader, event.data);
processAudioData(combinedData);
}
};
functionprocessAudioData(audioData) {
// Here you would typically send the data to the Web Audio API// For example:// audioContext.decodeAudioData(audioData)// .then(decodedData => {// // Use the decoded audio data// })// .catch(error => console.error("Error decoding audio data:", error));
}
支持 AudioBuffer.copyToChannel
playAudioBufferQueue() { if(this.data.playing || this.data.audioBufferQueue.length === 0) { return } if (this.data.audioBufferQueue.length > 0) { this.setData({ status: '正在回复', playing: true }) const currentPlayData = this.data.audioBufferQueue.shift() const source = this.createFadeInOutNode(currentPlayData, 0.018) source.start() source.onended = () => { this.setData({ playing: false }) this.playAudioBufferQueue() if(this.data.audioBufferQueue.length === 0) { this.setData({ status: '正在聆听', playing: false }) }) } } } }, createFadeInOutNode(buffer, fadeDuration) { const source = this.data.audioContext.createBufferSource() source.buffer = buffer const gainNode = this.data.audioContext.createGain() source.connect(gainNode) gainNode.connect(this.data.audioContext.destination) // 添加淡入效果 gainNode.gain.setValueAtTime(0, this.data.audioContext.currentTime) gainNode.gain.linearRampToValueAtTime(1, this.data.audioContext.currentTime + fadeDuration) // 添加淡出效果 gainNode.gain.setValueAtTime(1, this.data.audioContext.currentTime + buffer.duration - fadeDuration) gainNode.gain.linearRampToValueAtTime(0, this.data.audioContext.currentTime + buffer.duration) return source },
我尝试了一下自己拼装,倒是能播放了,就是来一个chunk 就拼起来, 格式是 wav 的,所以播放时都要在 chunk 前拼上 wav 的header
可参考代码
function combineHeaderAndChunk(header:ArrayBuffer, chunk:ArrayBuffer) { // Create a new ArrayBuffer to hold both the header and the chunk const combinedBuffer = new ArrayBuffer(header.byteLength + chunk.byteLength); // Create a Uint8Array view of the combined buffer const combinedView = new Uint8Array(combinedBuffer); // Copy the header into the combined buffer combinedView.set(new Uint8Array(header), 0); // Copy the chunk data after the header combinedView.set(new Uint8Array(chunk), header.byteLength); return combinedBuffer; } // Usage example let storedHeader = null; let isFirstChunk = true; ws.onmessage = function(event) { if (isFirstChunk) { // Assume the first 44 bytes are the header storedHeader = event.data.slice(0, 44); // const headerInfo = parseWavHeader(storedHeader); // console.log("WAV Header Info:", headerInfo); // Handle the rest of the first chunk as audio data const firstChunkData = event.data.slice(44); const combinedData = combineHeaderAndChunk(storedHeader, firstChunkData); processAudioData(combinedData); isFirstChunk = false; } else { // For subsequent chunks, combine with the stored header const combinedData = combineHeaderAndChunk(storedHeader, event.data); processAudioData(combinedData); } }; function processAudioData(audioData) { // Here you would typically send the data to the Web Audio API // For example: // audioContext.decodeAudioData(audioData) // .then(decodedData => { // // Use the decoded audio data // }) // .catch(error => console.error("Error decoding audio data:", error)); }
请问解决了吗,可以分享一下实现方案吗
请问解决了吗?有相关示例代码方便提供吗?
const audioCtx = wx.createWebAudioContext(); const source = audioCtx.createBufferSource(); audioCtx.decodeAudioData(data,(buffer)=>{ source.buffer = buffer; source.connect(audioCtx.destination); source.start(); })
我尝试后续在souce.buffer中动态添加数据,但并未成功