NOI_3_ZIP / app.py
hynt's picture
Update app.py
efb70b3
raw
history blame
4.11 kB
import spaces
import os
from huggingface_hub import login
import gradio as gr
from cached_path import cached_path
import tempfile
import numpy as np
from vinorm import TTSnorm
from infer_zipvoice import model, tokenizer, feature_extractor, device
from utils import preprocess_ref_audio_text, save_spectrogram, chunk_text
# Retrieve token from secrets
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# Log in to Hugging Face
if hf_token:
login(token=hf_token)
def post_process(text):
text = " " + text + " "
text = text.replace(" . . ", " . ")
text = " " + text + " "
text = text.replace(" .. ", " . ")
text = " " + text + " "
text = text.replace(" , , ", " , ")
text = " " + text + " "
text = text.replace(" ,, ", " , ")
text = " " + text + " "
text = text.replace('"', "")
return " ".join(text.split())
@spaces.GPU
def infer_tts(ref_audio_orig: str, gen_text: str, speed: float = 1.0, request: gr.Request = None):
if not ref_audio_orig:
raise gr.Error("Please upload a sample audio file.")
if not gen_text.strip():
raise gr.Error("Please enter the text content to generate voice.")
if len(gen_text.split()) > 1000:
raise gr.Error("Please enter text content with less than 1000 words.")
try:
gen_texts = chunk_text(gen_text)
final_wave_total = None
for i, gen_text in enumerate(gen_texts):
ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_orig, "")
final_wave = generate_sentence(
ref_text.lower(),
ref_audio,
post_process(TTSnorm(gen_text)).lower(),
model=model,
vocoder=vocoder,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
speed=speed
)
if i == 0:
final_wave_total = final_wave
else:
final_wave_total = np.concatenate((final_wave_total, final_wave), axis=0)
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram:
spectrogram_path = tmp_spectrogram.name
save_spectrogram(final_wave_total, spectrogram_path)
return (final_sample_rate, final_wave), spectrogram_path
except Exception as e:
raise gr.Error(f"Error generating voice: {e}")
# Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🎀 ZipVoice: Vietnamese Text-to-Speech Synthesis.
# The model was trained with approximately 150 hours of data on a RTX 3090 GPU.
Enter text and upload a sample voice to generate natural speech.
""")
with gr.Row():
ref_audio = gr.Audio(label="πŸ”Š Sample Voice", type="filepath")
gen_text = gr.Textbox(label="πŸ“ Text", placeholder="Enter the text to generate voice...", lines=3)
speed = gr.Slider(0.3, 2.0, value=1.0, step=0.1, label="⚑ Speed")
btn_synthesize = gr.Button("πŸ”₯ Generate Voice")
with gr.Row():
output_audio = gr.Audio(label="🎧 Generated Audio", type="numpy")
output_spectrogram = gr.Image(label="πŸ“Š Spectrogram")
model_limitations = gr.Textbox(
value="""1. This model may not perform well with numerical characters, dates, special characters, etc. => A text normalization module is needed.
2. The rhythm of some generated audios may be inconsistent or choppy => It is recommended to select clearly pronounced sample audios with minimal pauses for better synthesis quality.
3. Default, reference audio text uses the pho-whisper-medium model, which may not always accurately recognize Vietnamese, resulting in poor voice synthesis quality.
4. Inference with overly long paragraphs may produce poor results.""",
label="❗ Model Limitations",
lines=4,
interactive=False
)
btn_synthesize.click(infer_tts, inputs=[ref_audio, gen_text, speed], outputs=[output_audio, output_spectrogram])
# Run Gradio with share=True to get a gradio.live link
demo.queue().launch()