在获取到API返回的请求参数后,可以通过Unity的AudioSource组件来播放音频。首先,你需要在GenshinTextToSpeech类中添加一个AudioSource的变量:

public AudioSource audioSource;

然后,在Speak方法中,在获取到音频后,将其赋值给AudioSource的clip属性,并调用Play方法播放音频:

if (speechRequest.responseCode == 200)
{
    AudioClip audioClip = DownloadHandlerAudioClip.GetContent(speechRequest);
    _callback(audioClip);

    // 播放音频
    audioSource.clip = audioClip;
    audioSource.Play();
}

最后,确保你在Unity编辑器中将GenshinTextToSpeech脚本组件的audioSource属性与一个AudioSource组件关联起来。

这样,当调用Speak方法时,它会获取到音频并使用AudioSource播放音频。完整的代码示例如下:

using System;
using System.Collections;
using System.Xml.Linq;
using UnityEngine;
using UnityEngine.Networking;

public class GenshinTextToSpeech : TTS
{

    #region 参数定义
    /// <summary>
    /// Genshin配置项
    /// </summary>
    /// <summary>
    /// 朗读的角色
    /// </summary>
    [Header("朗读声音设置")]
    public string speaker = "胡桃";
    /// <summary>
    /// 音频格式
    /// </summary>
    [Header("音频格式设置")]
    public string format = "wav";
    /// <summary>
    /// 音频长度
    /// </summary>
    [Header("音频长度设置")]
    public float length = 1f;
    /// <summary>
    /// 噪声
    /// </summary>
    [Header("噪声设置")]
    public float noise = 0.5f;
    /// <summary>
    /// 噪声权重
    /// </summary>
    [Header("噪声权重设置")]
    public float noisew = 0.9f;
    /// <summary>
    /// 声调比例
    /// </summary>
    [Header("声调比例设置")]
    public float sdp_ratio = 0.2f;

    [Header("音频播放设置")]
    public AudioSource audioSource;

    #endregion

    private void Awake()
    {
        m_PostURL = "https://genshinvoice.top/api"; 
    }

    /// <summary>
    /// 语音合成
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    public override void Speak(string _msg, Action<AudioClip> _callback)
    {
        StartCoroutine(GetVoice(_msg, _callback));
    }

    /// <summary>
    /// 语音合成,返回合成文本
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    public override void Speak(string _msg, Action<AudioClip, string> _callback)
    {
        StartCoroutine(GetVoice(_msg, _callback));
    }

    /// <summary>
    /// restful api语音合成
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    /// <returns></returns>
    private IEnumerator GetVoice(string _msg, Action<AudioClip> _callback)
    {
        stopwatch.Restart();
        //发送报文
        string textToSpeechRequestBody = GenerateTextToSpeech(speaker, _msg, format, length, noise, noisew, sdp_ratio);

        using (UnityWebRequest speechRequest = new UnityWebRequest(m_PostURL, "POST"))
        {
            byte[] data = System.Text.Encoding.UTF8.GetBytes(textToSpeechRequestBody);
            speechRequest.uploadHandler = (UploadHandler)new UploadHandlerRaw(data);
            speechRequest.downloadHandler = (DownloadHandler)new DownloadHandlerAudioClip(speechRequest.uri, AudioType.WAV);

            yield return speechRequest.SendWebRequest();

            if (speechRequest.responseCode == 200)
            {
                AudioClip audioClip = DownloadHandlerAudioClip.GetContent(speechRequest);
                _callback(audioClip);

                // 播放音频
                audioSource.clip = audioClip;
                audioSource.Play();
            }
            else
            {
                Debug.LogError("语音合成失败: " + speechRequest.error);
            }
        }

        stopwatch.Stop();
        Debug.Log("Genshin语音合成耗时:" + stopwatch.Elapsed.TotalSeconds);
    }

    /// <summary>
    ///  restful api语音合成,返回合成文本
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    /// <returns></returns>
    private IEnumerator GetVoice(string _msg, Action<AudioClip, string> _callback)
    {
        stopwatch.Restart();
        //发送报文
        string textToSpeechRequestBody = GenerateTextToSpeech(speaker, _msg, format, length, noise, noisew, sdp_ratio);

        using (UnityWebRequest speechRequest = new UnityWebRequest(m_PostURL, "POST"))
        {
            byte[] data = System.Text.Encoding.UTF8.GetBytes(textToSpeechRequestBody);
            speechRequest.uploadHandler = (UploadHandler)new UploadHandlerRaw(data);
            speechRequest.downloadHandler = (DownloadHandler)new DownloadHandlerAudioClip(speechRequest.uri, AudioType.WAV);
            Debug.Log("传参的值是:" + textToSpeechRequestBody);
            yield return speechRequest.SendWebRequest();

            if (speechRequest.responseCode == 200)
            {
                AudioClip audioClip = DownloadHandlerAudioClip.GetContent(speechRequest);
                _callback(audioClip, _msg);

                // 播放音频
                audioSource.clip = audioClip;
                audioSource.Play();
            }
            else
            {
                
                Debug.LogError("语音合成失败: " + speechRequest.error);
            }
        }

        stopwatch.Stop();
        Debug.Log("Genshin语音合成耗时:" + stopwatch.Elapsed.TotalSeconds);
    }

    /// <summary>
    /// 报文格式转换
    /// </summary>
    /// <param name="speaker"></param>
    /// <param name="msg"></param>
    /// <param name="format"></param>
    /// <param name="length"></param>
    /// <param name="noise"></param>
    /// <param name="noisew"></param>
    /// <param name="sdp_ratio"></param>
    /// <returns></returns>
    public string GenerateTextToSpeech(string speaker, string msg, string format, float length, float noise, float noisew, float sdp_ratio)
    {
        string urlParams = string.Format("?speaker={0}&text={1}&format={2}&length={3}&noise={4}&noisew={5}&sdp_ratio={6}", speaker, msg, format, length, noise, noisew, sdp_ratio);
        string url = m_PostURL + urlParams;

        return url;
    }
}

请注意,这里的代码假设你已经在Unity中正确设置了AudioSource组件,并将其与GenshinTextToSpeech脚本组件的audioSource属性关联起来。

如何获取到api返回的请求参数后在下面代码中增加unity进行播放音频的方法?下面代码只会获取到返回参数的页面信息并没有获取到音频的文件播放请完整示例下代码htmlheadmeta name=viewport content=width=device-widthstyle type=textcssfont-face font-family TencentSans; src urlchrome-e

原文地址: https://www.cveoy.top/t/topic/i9JP 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录