Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,25 +14,59 @@ def _grab_best_device(use_gpu=True):
|
|
| 14 |
|
| 15 |
device = _grab_best_device()
|
| 16 |
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
pipe_dict = {
|
| 21 |
-
"current_model": "ylacombe/
|
| 22 |
"pipe": pipeline("text-to-speech", model=HUB_PATH, device=0),
|
|
|
|
|
|
|
| 23 |
}
|
| 24 |
|
| 25 |
title = "# 🐶 VITS"
|
| 26 |
|
|
|
|
|
|
|
| 27 |
description = """
|
| 28 |
|
| 29 |
"""
|
| 30 |
|
| 31 |
-
max_speakers = 15
|
| 32 |
-
|
| 33 |
|
| 34 |
# Inference
|
| 35 |
-
def generate_audio(text, model_id):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
if pipe_dict["current_model"] != model_id:
|
| 38 |
gr.Warning("Model has changed - loading new model")
|
|
@@ -42,8 +76,15 @@ def generate_audio(text, model_id):
|
|
| 42 |
num_speakers = pipe_dict["pipe"].model.config.num_speakers
|
| 43 |
|
| 44 |
out = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
if num_speakers>1:
|
| 46 |
-
for i in range(min(num_speakers, max_speakers)):
|
| 47 |
forward_params = {"speaker_id": i}
|
| 48 |
output = pipe_dict["pipe"](text, forward_params=forward_params)
|
| 49 |
|
|
@@ -56,7 +97,7 @@ def generate_audio(text, model_id):
|
|
| 56 |
output = gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label="Generated Audio - Mono speaker", show_label=True,
|
| 57 |
visible=True)
|
| 58 |
out.append(output)
|
| 59 |
-
out.extend([gr.Audio(visible=False)]*(max_speakers-
|
| 60 |
return out
|
| 61 |
|
| 62 |
|
|
@@ -66,25 +107,18 @@ with gr.Blocks() as demo_blocks:
|
|
| 66 |
gr.Markdown(description)
|
| 67 |
with gr.Row():
|
| 68 |
with gr.Column():
|
| 69 |
-
inp_text = gr.Textbox(label="Input Text", info="What would you like
|
| 70 |
btn = gr.Button("Generate Audio!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
model_id = gr.Dropdown(
|
| 72 |
-
[
|
| 73 |
-
|
| 74 |
-
"ylacombe/vits_ljs_welsh_female_monospeaker",
|
| 75 |
-
"ylacombe/vits_ljs_welsh_male_monospeaker",
|
| 76 |
-
"ylacombe/vits_ljs_welsh_female_monospeaker_2",
|
| 77 |
-
"ylacombe/vits_ljs_welsh_male_monospeaker_2",
|
| 78 |
-
"kakao-enterprise/vits-ljs",
|
| 79 |
-
"ylacombe/vits_ljs_welsh_female_2",
|
| 80 |
-
"ylacombe/vits_ljs_irish_male_2",
|
| 81 |
-
"ylacombe/vits_ljs_scottish_female_2",
|
| 82 |
-
"ylacombe/mms-tam-finetuned-monospeaker-all",
|
| 83 |
-
"ylacombe/mms-spa-finetuned-chilean-monospeaker-all",
|
| 84 |
-
"ylacombe/mms-tam-finetuned-monospeaker",
|
| 85 |
-
"ylacombe/mms-spa-finetuned-chilean-monospeaker"
|
| 86 |
-
],
|
| 87 |
-
value="ylacombe/vits_ljs_welsh_male_2",
|
| 88 |
label="Model",
|
| 89 |
info="Model you want to test",
|
| 90 |
)
|
|
@@ -94,8 +128,18 @@ with gr.Blocks() as demo_blocks:
|
|
| 94 |
for i in range(max_speakers):
|
| 95 |
out_audio = gr.Audio(type="numpy", autoplay=False, label=f"Generated Audio - speaker {i}", show_label=True, visible=False)
|
| 96 |
outputs.append(out_audio)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
-
btn.click(generate_audio, [inp_text, model_id], outputs)
|
| 99 |
|
| 100 |
|
| 101 |
demo_blocks.queue().launch()
|
|
|
|
| 14 |
|
| 15 |
device = _grab_best_device()
|
| 16 |
|
| 17 |
+
default_model_per_language = {
|
| 18 |
+
"english": "kakao-enterprise/vits-ljs",
|
| 19 |
+
"spanish": "facebook/mms-tts-spa",
|
| 20 |
+
"tamil": "facebook/mms-tts-tam"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
models_per_language = {
|
| 24 |
+
"english": [
|
| 25 |
+
"ylacombe/vits_ljs_irish_male_monospeaker",
|
| 26 |
+
"ylacombe/vits_ljs_welsh_female_monospeaker",
|
| 27 |
+
"ylacombe/vits_ljs_welsh_male_monospeaker",
|
| 28 |
+
"ylacombe/vits_ljs_welsh_female_monospeaker_2",
|
| 29 |
+
"ylacombe/vits_ljs_welsh_male_monospeaker_2",
|
| 30 |
+
"ylacombe/vits_ljs_welsh_female_2",
|
| 31 |
+
"ylacombe/vits_ljs_irish_male_2",
|
| 32 |
+
"ylacombe/vits_ljs_scottish_female_2",
|
| 33 |
+
],
|
| 34 |
+
"spanish": [
|
| 35 |
+
"ylacombe/mms-spa-finetuned-monospeaker-all",
|
| 36 |
+
"ylacombe/mms-spa-finetuned-monospeaker",
|
| 37 |
+
],
|
| 38 |
+
"tamil": [
|
| 39 |
+
"ylacombe/mms-tam-finetuned-monospeaker-all",
|
| 40 |
+
"ylacombe/mms-tam-finetuned-monospeaker",
|
| 41 |
+
]
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
HUB_PATH = "ylacombe/vits_ljs_welsh_female_monospeaker"
|
| 45 |
|
| 46 |
|
| 47 |
pipe_dict = {
|
| 48 |
+
"current_model": "ylacombe/vits_ljs_welsh_female_monospeaker",
|
| 49 |
"pipe": pipeline("text-to-speech", model=HUB_PATH, device=0),
|
| 50 |
+
"original_pipe": pipeline("text-to-speech", model=default_model_per_language["english"], device=0),
|
| 51 |
+
"language": "english",
|
| 52 |
}
|
| 53 |
|
| 54 |
title = "# 🐶 VITS"
|
| 55 |
|
| 56 |
+
max_speakers = 15
|
| 57 |
+
|
| 58 |
description = """
|
| 59 |
|
| 60 |
"""
|
| 61 |
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# Inference
|
| 64 |
+
def generate_audio(text, model_id, language):
|
| 65 |
+
|
| 66 |
+
if pipe_dict["language"] != language:
|
| 67 |
+
gr.Warning(f"Language has changed - loading new default model: {default_model_per_language[language]}")
|
| 68 |
+
pipe_dict["language"] == language
|
| 69 |
+
pipe_dict["original_pipe"] = pipeline("text-to-speech", model=default_model_per_language[language], device=0)
|
| 70 |
|
| 71 |
if pipe_dict["current_model"] != model_id:
|
| 72 |
gr.Warning("Model has changed - loading new model")
|
|
|
|
| 76 |
num_speakers = pipe_dict["pipe"].model.config.num_speakers
|
| 77 |
|
| 78 |
out = []
|
| 79 |
+
# first generate original model result
|
| 80 |
+
output = pipe_dict["original_pipe"](text)
|
| 81 |
+
output = gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label=f"Non finetuned model prediction {default_model_per_language[language]}", show_label=True,
|
| 82 |
+
visible=True)
|
| 83 |
+
out.append(output)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
if num_speakers>1:
|
| 87 |
+
for i in range(min(num_speakers, max_speakers - 1)):
|
| 88 |
forward_params = {"speaker_id": i}
|
| 89 |
output = pipe_dict["pipe"](text, forward_params=forward_params)
|
| 90 |
|
|
|
|
| 97 |
output = gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=False, label="Generated Audio - Mono speaker", show_label=True,
|
| 98 |
visible=True)
|
| 99 |
out.append(output)
|
| 100 |
+
out.extend([gr.Audio(visible=False)]*(max_speakers-2))
|
| 101 |
return out
|
| 102 |
|
| 103 |
|
|
|
|
| 107 |
gr.Markdown(description)
|
| 108 |
with gr.Row():
|
| 109 |
with gr.Column():
|
| 110 |
+
inp_text = gr.Textbox(label="Input Text", info="What would you like VITS to synthesise?")
|
| 111 |
btn = gr.Button("Generate Audio!")
|
| 112 |
+
language = gr.Dropdown(
|
| 113 |
+
default_model_per_language.keys(),
|
| 114 |
+
value = "english",
|
| 115 |
+
label = "language",
|
| 116 |
+
info = "Language that you want to test"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
model_id = gr.Dropdown(
|
| 120 |
+
models_per_language["english"],
|
| 121 |
+
value="ylacombe/vits_ljs_welsh_female_monospeaker_2",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
label="Model",
|
| 123 |
info="Model you want to test",
|
| 124 |
)
|
|
|
|
| 128 |
for i in range(max_speakers):
|
| 129 |
out_audio = gr.Audio(type="numpy", autoplay=False, label=f"Generated Audio - speaker {i}", show_label=True, visible=False)
|
| 130 |
outputs.append(out_audio)
|
| 131 |
+
|
| 132 |
+
language.change(lambda language: gr.Dropdown(
|
| 133 |
+
models_per_language[language],
|
| 134 |
+
value=models_per_language[language][0],
|
| 135 |
+
label="Model",
|
| 136 |
+
info="Model you want to test",
|
| 137 |
+
),
|
| 138 |
+
language,
|
| 139 |
+
model_id
|
| 140 |
+
)
|
| 141 |
|
| 142 |
+
btn.click(generate_audio, [inp_text, model_id, language], outputs)
|
| 143 |
|
| 144 |
|
| 145 |
demo_blocks.queue().launch()
|