const headers = {
'Ocp-Apim-Subscription-Key': subscriptionKey,
'Content-Type': 'application/ssml+xml',
'Host': 'eastus.api.cognitive.microsoft.com',
'X-Microsoft-OutputFormat': 'riff-24khz-16bit-mono-pcm',
'User-Agent': 'banluyingyu'
};
// 设置请求体
let requestBody ='<speak version="1.0" xmlns="https://www.w3.org/2001/10/synthesis" xml:lang="en-US">
<voice name="en-US-AriaNeural">Hello, how are you today?Hello, how are you today?Hello, how are you today?</voice>
</speak>';
let voiceRes = await uniCloud.httpclient.request(endpoint, {
method: 'POST',
headers: headers,
data: requestBody
// responseType: 'arraybuffer'
})
// 获取返回的音频流
const audioStream = voiceRes.data;
请问一下 如何把这个 audioStream 保存成 mp3 文件??
//上传到云存储
let fileRes = await uniCloud.uploadFile({
cloudPath: '123456789.mp3',
fileContent: audioStream
})
我直接用 uploadFile 保存确实生成了一个mp3文件,但是不能播放。
传上去的MP3不能播放,是不是因为输出格式不对?
'X-Microsoft-OutputFormat': 'riff-16khz-16bit-mono-pcm'把这个改成'X-Microsoft-OutputFormat': 'audio-24khz-48kbitrate-mono-mp3'试试
const ffmpeg = require('fluent-ffmpeg');
const fs = require('fs');
const audioBuffer = fs.readFileSync('path/to/audio-24khz-48kbitrate-mono-mp3');
const audioStream = new Uint8Array(audioBuffer);
// 将 audioStream 转换为标准的 MP3 音频格式
ffmpeg()
.input(audioStream)
.outputOptions('-codec:a', 'libmp3lame')
.outputOptions('-b:a', '128k')
.format('mp3')
.on('error', function(err) {
console.log('An error occurred: ' + err.message);
})
.on('end', function() {
console.log('Audio encoding complete');
})
.pipe(fs.createWriteStream('path/to/output.mp3'));
这里 chatGPT 说的解决文字发,但这个我还是不理解,怎么上传到云存储
没看懂,下载图片不也是返回buffer,转成图片会做吗
buffer转
buffer转base64发给我,我试试
数据已经返回成功。
const subscriptionKey = 'YOUR_SUBSCRIPTION_KEY'; const endpoint = 'https://YOUR_REGION.tts.speech.microsoft.com/cognitiveservices/v1'; // 设置请求头 const headers = { 'Authorization': 'Bearer ' + subscriptionKey, 'Content-Type': 'application/ssml+xml', 'X-Microsoft-OutputFormat': 'riff-16khz-16bit-mono-pcm' }; // 设置请求体 const requestBody = '<speak version="1.0" xmlns="https://www.w3.org/2001/10/synthesis" xml:lang="en-US"><voice name="en-US-AriaNeural"><prosody rate="default">Hello, how are you today?</prosody></voice></speak>'; exports.main = async (event, context) => { // 发送POST请求 const res = await uni.request({ url: endpoint, method: 'POST', data: requestBody, header: headers, responseType: 'arraybuffer' }); // 获取返回的音频流 const audioStream = res.data; // 将音频流转换为音频文件并下载 const blob = new Blob([audioStream], { type: "audio/wav" }); const url = URL.createObjectURL(blob); const link = document.createElement("a"); link.href = url; link.download = "speech.wav"; link.click(); }; 这个是我让 chatGPT 帮我写的,但后面的 将音频流转换为音频文件并下载 不是很明白。