以下是修改后的完整代码示例:

using System;
using System.Collections;
using System.Xml.Linq;
using UnityEngine;
using UnityEngine.Networking;

public class GenshinTextToSpeech : TTS
{

    #region 参数定义
    /// <summary>
    /// Genshin配置项
    /// </summary>
    /// <summary>
    /// 朗读的角色
    /// </summary>
    [Header("朗读声音设置")]
    public string speaker = "胡桃";
    /// <summary>
    /// 音频格式
    /// </summary>
    [Header("音频格式设置")]
    public string format = "wav";
    /// <summary>
    /// 音频长度
    /// </summary>
    [Header("音频长度设置")]
    public float length = 1f;
    /// <summary>
    /// 噪声
    /// </summary>
    [Header("噪声设置")]
    public float noise = 0.5f;
    /// <summary>
    /// 噪声权重
    /// </summary>
    [Header("噪声权重设置")]
    public float noisew = 0.9f;
    /// <summary>
    /// 声调比例
    /// </summary>
    [Header("声调比例设置")]
    public float sdp_ratio = 0.2f;

    #endregion

    private void Awake()
    {
        m_PostURL = "https://genshinvoice.top/api"; 
    }

    /// <summary>
    /// 语音合成
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    public override void Speak(string _msg, Action<AudioClip> _callback)
    {
        StartCoroutine(GetVoice(_msg, _callback));
    }

    /// <summary>
    /// 语音合成,返回合成文本
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    public override void Speak(string _msg, Action<AudioClip, string> _callback)
    {
        StartCoroutine(GetVoice(_msg, _callback));
    }

    /// <summary>
    /// restful api语音合成
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    /// <returns></returns>
    private IEnumerator GetVoice(string _msg, Action<AudioClip> _callback)
    {
        stopwatch.Restart();
        //发送报文
        string textToSpeechRequestBody = GenerateTextToSpeech(speaker, _msg, format, length, noise, noisew, sdp_ratio);

        using (UnityWebRequest speechRequest = new UnityWebRequest(m_PostURL, "POST"))
        {
            byte[] data = System.Text.Encoding.UTF8.GetBytes(textToSpeechRequestBody);
            speechRequest.uploadHandler = (UploadHandler)new UploadHandlerRaw(data);
            speechRequest.downloadHandler = (DownloadHandler)new DownloadHandlerAudioClip(speechRequest.uri, AudioType.WAV);

            yield return speechRequest.SendWebRequest();

            if (speechRequest.responseCode == 200)
            {
                AudioClip audioClip = DownloadHandlerAudioClip.GetContent(speechRequest);
                _callback(audioClip);
                PlayAudioClip(audioClip); // 播放音频
            }
            else
            {
                Debug.LogError("语音合成失败: " + speechRequest.error);
            }
        }

        stopwatch.Stop();
        Debug.Log("Genshin语音合成耗时:" + stopwatch.Elapsed.TotalSeconds);
    }

    /// <summary>
    ///  restful api语音合成,返回合成文本
    /// </summary>
    /// <param name="_msg"></param>
    /// <param name="_callback"></param>
    /// <returns></returns>
    private IEnumerator GetVoice(string _msg, Action<AudioClip, string> _callback)
    {
        stopwatch.Restart();
        //发送报文
        string textToSpeechRequestBody = GenerateTextToSpeech(speaker, _msg, format, length, noise, noisew, sdp_ratio);

        using (UnityWebRequest speechRequest = new UnityWebRequest(m_PostURL, "POST"))
        {
            byte[] data = System.Text.Encoding.UTF8.GetBytes(textToSpeechRequestBody);
            speechRequest.uploadHandler = (UploadHandler)new UploadHandlerRaw(data);
            speechRequest.downloadHandler = (DownloadHandler)new DownloadHandlerAudioClip(speechRequest.uri, AudioType.WAV);
            Debug.Log("传参的值是:" + textToSpeechRequestBody);
            yield return speechRequest.SendWebRequest();

            if (speechRequest.responseCode == 200)
            {
                AudioClip audioClip = DownloadHandlerAudioClip.GetContent(speechRequest);
                _callback(audioClip, _msg);
                PlayAudioClip(audioClip); // 播放音频
            }
            else
            {
                Debug.LogError("语音合成失败: " + speechRequest.error);
            }
        }

        stopwatch.Stop();
        Debug.Log("Genshin语音合成耗时:" + stopwatch.Elapsed.TotalSeconds);
    }

    /// <summary>
    /// 报文格式转换
    /// </summary>
    /// <param name="speaker"></param>
    /// <param name="msg"></param>
    /// <param name="format"></param>
    /// <param name="length"></param>
    /// <param name="noise"></param>
    /// <param name="noisew"></param>
    /// <param name="sdp_ratio"></param>
    /// <returns></returns>
    public string GenerateTextToSpeech(string speaker, string msg, string format, float length, float noise, float noisew, float sdp_ratio)
    {
        string urlParams = string.Format("?speaker={0}&text={1}&format={2}&length={3}&noise={4}&noisew={5}&sdp_ratio={6}", speaker, msg, format, length, noise, noisew, sdp_ratio);
        string url = m_PostURL + urlParams;

        return url;
    }

    /// <summary>
    /// 播放音频
    /// </summary>
    /// <param name="audioClip"></param>
    private void PlayAudioClip(AudioClip audioClip)
    {
        AudioSource audioSource = GetComponent<AudioSource>();
        if (audioSource == null)
        {
            audioSource = gameObject.AddComponent<AudioSource>();
        }

        audioSource.clip = audioClip;
        audioSource.Play();
    }
}

在上述代码中,我添加了一个PlayAudioClip方法,用于播放获取到的音频文件。该方法使用Unity的AudioSource组件来播放音频。在合成成功后,会调用PlayAudioClip方法来播放音频。

Unity中使用Genshin语音合成API解析并播放wav音频

原文地址: https://www.cveoy.top/t/topic/jBPC 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录