收藏
回答

AudioBuffer是否支持动态添加buffer数据特性?如不支持,实时播放音频怎么解决播放卡顿?

 WebAudioContext.createBuffer

回答关注问题邀请回答
收藏

6 个回答

  • 社区技术运营专员--Demons
    社区技术运营专员--Demons
    2024-01-18

    支持 AudioBuffer.copyToChannel

    2024-01-18
    有用
    回复
  • 曹程
    曹程
    02-10
    我的代码可以流畅播放,只是微信小程序的scriptProcessorNode的接口过时了,没有实现最新的浏览器:AudioWorkletNode 接口, 等微信努力吧
    function initWebAudio() {
        try {
            scriptProcessorNode = audioContext.createScriptProcessor(BUFFER_SIZE, 1, 1);
            scriptProcessorNode.connect(gainNode); 
            scriptProcessorNode.onaudioprocess = (audioProcessingEvent) => {
                //  console.log("onaudioprocess triggered");
                const outputBuffer = audioProcessingEvent.outputBuffer;
                const outputData = outputBuffer.getChannelData(0);
                if (pcmBuffer.length >= BUFFER_SIZE) {
                    for (let i = 0; i < BUFFER_SIZE; i++) {
                        outputData[i] = pcmBuffer[i];
                    }
                    pcmBuffer = pcmBuffer.slice(BUFFER_SIZE);
                } else {
                    for (let i = 0; i < BUFFER_SIZE; i++) {
                        outputData[i] = 0;
                    }
                }        
             };
            audioContext.resume().then(() => {
                console.log("AudioContext resumed.");
            }).catch((err) => {
                console.error("Failed to resume AudioContext:", err);
            });
            console.log("Web Audio initialized successfully.");
        } catch (err) {
            console.error("Failed to initialize Web Audio:", err);
        }
    }
    // 接收 G.711 数据并解码
    async function play(data, type) {
        if (type === 1) {
            const pcmData = new Int16Array(data.length);
            for (let i = 0; i < data.length; i++) {
                pcmData[i] = g711Codec.alaw2linear(data[i]);
            }
            const float32Data = new Float32Array(pcmData.length);
            for (let i = 0; i < pcmData.length; i++) {
                float32Data[i] = Math.max(-1, Math.min(1, pcmData[i] / 32768.0));
            }
            // 采样率转换
            const resampledData = resamplePCM(float32Data, SAMPLE_RATE, audioContext.sampleRate);
            // 合并 PCM 数据
            const newPcmBuffer = new Float32Array(pcmBuffer.length + resampledData.length);
            newPcmBuffer.set(pcmBuffer, 0);
            newPcmBuffer.set(resampledData, pcmBuffer.length);
            pcmBuffer = newPcmBuffer;
        }
    }
    
    function resamplePCM(input, inputSampleRate, outputSampleRate) {
        const ratio = outputSampleRate / inputSampleRate;
        const outputLength = Math.round(input.length * ratio);
        const output = new Float32Array(outputLength);
        for (let i = 0; i < outputLength; i++) {
            const originalIndex = i / ratio;
            const lowerIndex = Math.floor(originalIndex);
            const upperIndex = Math.min(Math.ceil(originalIndex), input.length - 1);
            const weight = originalIndex - lowerIndex;
            output[i] = input[lowerIndex] * (1 - weight) + input[upperIndex] * weight;
        }
        return output;
    }
    


    02-10
    有用
    回复 4
    • 🧸
      🧸
      02-19
      大佬你这个有源码能看下嘛?我也遇到了这个问题
      02-19
      回复
    • 曹程
      曹程
      02-27
      const recorderManager = wx.getRecorderManager();
      // 初始化音频上下文
      // const gainNode = audioContext.createGain();
      // gainNode.connect(audioContext.destination);
      import * as g711 from './audioG711';
      const g711Codec = new g711.G711Codec();
      class AudioRecorder {
        constructor(codec) {
          this.codec = codec;
          this.frameQueue = [];
          this.resolveNextFrame = null;
          this.g711Codec = g711Codec; // 使用全局实例
          this.initRecorder();
        }
        initRecorder() {
          recorderManager.onStart(() => {
            console.log('recorder start');
          });
          recorderManager.onFrameRecorded((res) => {
            if (res.frameBuffer) {
             // console.log('getNextAudioFrame', res.frameBuffer);
              this.frameQueue.push(res.frameBuffer);
              if (this.resolveNextFrame) {
                this.resolveNextFrame(this.frameQueue.shift());
                this.resolveNextFrame = null;
              }
            }
          });
        }
        async getNextAudioFrame() {
          let frame;
          if (this.frameQueue.length > 0) {
            frame = this.frameQueue.shift();
          } else {
            frame = await new Promise((resolve) => {
              this.resolveNextFrame = resolve;
            });
          }
          if (this.codec === 'g711') {
            const encoded = this.g711Codec.encode(new Int16Array(frame));
            //console.log('getNextAudioFrame', frame ,'encoded:', encoded);
            // if (encoded.length >= 512) {
            // return encoded.slice(0, 512);
            // }
            return encoded;
          }
          return frame;
        }
        start() {
          recorderManager.start({
            format: 'PCM',
            sampleRate: 8000,
            encodeBitRate: 16000,
            numberOfChannels: 1,
            frameSize: 1,
          });
        }
        stop() {
          recorderManager.stop();
          this.frameQueue = [];
          if (this.resolveNextFrame) {
            this.resolveNextFrame(null);
          }
        }
      }
      function startRecording(codec) {
        return new Promise((resolve) => {
          const recorder = new AudioRecorder(codec);
          recorder.start();
          resolve(recorder);
        });
      }
      function stopRecording(recorder) {
        recorder.stop();
      }
      module.exports = {
        startRecording,
        stopRecording,
      };
      02-27
      回复
    • 曹程
      曹程
      02-27回复🧸
      // audioPlayer.js
      const webAudioContext = wx.createWebAudioContext();
      // 音频流相关参数
      const SAMPLE_RATE = 8000;
      // 建议选择较小的值,以降低延迟
      // #if MP
      const BUFFER_SIZE = 1024;
      // #elif IOS
      const BUFFER_SIZE = 2048;
      // #elif ANDROID
      const BUFFER_SIZE = 4096;
      // #endif
      // WebAudio 资源
      const audioContext = webAudioContext;
      const gainNode = audioContext.createGain();
      gainNode.connect(audioContext.destination);
      gainNode.gain.value = 2.0;
      //gainNode.gain.value = 1.0;
      let scriptProcessorNode = null;
      // G.711 编解码器
      import * as g711 from './audioG711';
      const g711Codec = new g711.G711Codec();
      // 用于存储未播放的 PCM 数据
      let pcmBuffer = new Float32Array(0);
      // 调试 Web Audio 初始化
      function initWebAudio() {
          try {
              scriptProcessorNode = audioContext.createScriptProcessor(BUFFER_SIZE, 1, 1);
              scriptProcessorNode.connect(gainNode);
              scriptProcessorNode.onaudioprocess = (audioProcessingEvent) => {
                  // console.log("onaudioprocess triggered");
                  const outputBuffer = audioProcessingEvent.outputBuffer;
                  const outputData = outputBuffer.getChannelData(0);
                  if (pcmBuffer.length >= BUFFER_SIZE) {
                      for (let i = 0; i < BUFFER_SIZE; i++) {
                          outputData[i] = pcmBuffer[i];
                      }
                      pcmBuffer = pcmBuffer.slice(BUFFER_SIZE);
                  } else {
                      for (let i = 0; i < BUFFER_SIZE; i++) {
                          outputData[i] = 0;
                      }
                  }
              };
              audioContext.resume().then(() => {
                  console.log("AudioContext resumed.");
              }).catch((err) => {
                  console.error("Failed to resume AudioContext:", err);
              });
              audioContext.resume().then(() => {
                  console.log("AudioContext resumed.");
              }).catch((err) => {
                  console.error("Failed to resume AudioContext:", err);
              });
              // 添加状态监听
              audioContext.onstatechange = () => {
                  console.log('AudioContext state changed to:', audioContext.state);
                  if (audioContext.state === 'suspended') {
                      console.log('AudioContext is suspended. Possibly due to backgrounding.');
                      pcmBuffer = new Float32Array(0);
                      console.log('pcmBuffer cleared.');
                      // 在这里处理音频上下文被挂起的情况
                      // 例如:保存当前状态,清除数据,停止播放等
                  } else if (audioContext.state === 'running') {
                      console.log('AudioContext is running. Resetting pcmBuffer');
                      // 在这里处理音频上下文恢复运行的情况
                      // 例如:恢复播放,重新加载数据等
                      // pcmBuffer = new Float32Array(0);
                      // console.log('pcmBuffer cleared.');
                  }
              };
              console.log("Web Audio initialized successfully.");
          } catch (err) {
              console.error("Failed to initialize Web Audio:", err);
          }
      }
      // 接收 G.711 数据并解码
      async function play(data, type) {
          if (type === 1) {
              const pcmData = new Int16Array(data.length);
              for (let i = 0; i < data.length; i++) {
                  pcmData[i] = g711Codec.alaw2linear(data[i]);
              }
              const float32Data = new Float32Array(pcmData.length);
              for (let i = 0; i < pcmData.length; i++) {
                  float32Data[i] = Math.max(-1, Math.min(1, pcmData[i] / 32768.0));
              }
              // 采样率转换
              const resampledData = resamplePCM(float32Data, SAMPLE_RATE, audioContext.sampleRate);
              // 合并 PCM 数据
              const newPcmBuffer = new Float32Array(pcmBuffer.length + resampledData.length);
              newPcmBuffer.set(pcmBuffer, 0);
              newPcmBuffer.set(resampledData, pcmBuffer.length);
              pcmBuffer = newPcmBuffer;
          }
      }
      function resamplePCM(input, inputSampleRate, outputSampleRate) {
          const ratio = outputSampleRate / inputSampleRate;
          const outputLength = Math.round(input.length * ratio);
          const output = new Float32Array(outputLength);
          for (let i = 0; i < outputLength; i++) {
              const originalIndex = i / ratio;
              const lowerIndex = Math.floor(originalIndex);
              const upperIndex = Math.min(Math.ceil(originalIndex), input.length - 1);
              const weight = originalIndex - lowerIndex;
              output[i] = input[lowerIndex] * (1 - weight) + input[upperIndex] * weight;
          }
          return output;
      }
      module.exports = {
          initWebAudio,
          play,
      };
      02-27
      回复
    • 🧸
      🧸
      03-03
      谢谢!
      03-03
      回复
  • \n
    \n
    2024-11-04
    playAudioBufferQueue() {
        if(this.data.playing || this.data.audioBufferQueue.length === 0) {
          return
        }
        if (this.data.audioBufferQueue.length > 0) {
          this.setData({ status: '正在回复', playing: true })
          const currentPlayData = this.data.audioBufferQueue.shift()
          const source = this.createFadeInOutNode(currentPlayData, 0.018)
          source.start()
          source.onended = () => {
            this.setData({ playing: false })
            this.playAudioBufferQueue()
            if(this.data.audioBufferQueue.length === 0) {
              this.setData({ status: '正在聆听', playing: false })
              })
            }
          } 
        }
      },
    
    createFadeInOutNode(buffer, fadeDuration) {
        const source = this.data.audioContext.createBufferSource()
        source.buffer = buffer
        const gainNode = this.data.audioContext.createGain()
        source.connect(gainNode)
        gainNode.connect(this.data.audioContext.destination)
        // 添加淡入效果
        gainNode.gain.setValueAtTime(0this.data.audioContext.currentTime)
        gainNode.gain.linearRampToValueAtTime(1this.data.audioContext.currentTime + fadeDuration)
        // 添加淡出效果
        gainNode.gain.setValueAtTime(1this.data.audioContext.currentTime + buffer.duration - fadeDuration)
        gainNode.gain.linearRampToValueAtTime(0this.data.audioContext.currentTime + buffer.duration)
        return source
      },
    
    2024-11-04
    有用
    回复 3
    • 土豆思思•﹏•
      土豆思思•﹏•
      2024-11-05
      两个buffer播放间还是会有卡顿呢,不是很流畅
      2024-11-05
      回复
    • tinywaves
      tinywaves
      2024-11-05回复土豆思思•﹏•
      蹲一个后续
      2024-11-05
      回复
    • \n
      \n
      2024-11-06回复土豆思思•﹏•
      1、调整下fadeDuration参数值,2、服务端返回的数据截断的是否过于粗暴
      2024-11-06
      回复
  • 我的假名和头像
    我的假名和头像
    2024-09-18

    我尝试了一下自己拼装,倒是能播放了,就是来一个chunk 就拼起来, 格式是 wav 的,所以播放时都要在 chunk 前拼上 wav 的header


    可参考代码

    function combineHeaderAndChunk(header:ArrayBuffer, chunk:ArrayBuffer) {
      // Create a new ArrayBuffer to hold both the header and the chunk
      const combinedBuffer = new ArrayBuffer(header.byteLength + chunk.byteLength);
    
      // Create a Uint8Array view of the combined buffer
      const combinedView = new Uint8Array(combinedBuffer);
    
      // Copy the header into the combined buffer
      combinedView.set(new Uint8Array(header), 0);
    
      // Copy the chunk data after the header
      combinedView.set(new Uint8Array(chunk), header.byteLength);
    
      return combinedBuffer;
    }
    
    // Usage example
    let storedHeader = null;
    let isFirstChunk = true;
    
    ws.onmessage = function(event) {
      if (isFirstChunk) {
          // Assume the first 44 bytes are the header
          storedHeader = event.data.slice(0, 44);
          // const headerInfo = parseWavHeader(storedHeader);
          // console.log("WAV Header Info:", headerInfo);
    
          // Handle the rest of the first chunk as audio data
          const firstChunkData = event.data.slice(44);
          const combinedData = combineHeaderAndChunk(storedHeader, firstChunkData);
          processAudioData(combinedData);
    
          isFirstChunk = false;
      } else {
          // For subsequent chunks, combine with the stored header
          const combinedData = combineHeaderAndChunk(storedHeader, event.data);
          processAudioData(combinedData);
      }
    };
    
    function processAudioData(audioData) {
      // Here you would typically send the data to the Web Audio API
      // For example:
      // audioContext.decodeAudioData(audioData)
      //     .then(decodedData => {
      //         // Use the decoded audio data
      //     })
      //     .catch(error => console.error("Error decoding audio data:", error));
    }
    
    
    2024-09-18
    有用
    回复 3
  • 巧克力张张包
    巧克力张张包
    2024-06-06

    请问解决了吗,可以分享一下实现方案吗

    2024-06-06
    有用
    回复
  • undefined
    undefined
    2024-05-22

    请问解决了吗?有相关示例代码方便提供吗?

    const audioCtx = wx.createWebAudioContext();
    const source = audioCtx.createBufferSource();
    audioCtx.decodeAudioData(data,(buffer)=>{
      source.buffer = buffer;
      source.connect(audioCtx.destination);
      source.start();
    }) 
    


    我尝试后续在souce.buffer中动态添加数据,但并未成功

    source.buffer.copyToChannel(buffer.getChannelData(0), 0, seek);
    
    2024-05-22
    有用
    回复 1
    • 巧克力张张包
      巧克力张张包
      2024-06-06
      请问解决了吗,可以分享一下实现方案吗
      2024-06-06
      回复
登录 后发表内容