init
Browse files- app.py +185 -0
- packages.txt +2 -0
- requirements.txt +1 -0
app.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import spaces
|
| 2 |
+
from kokoro import KModel, KPipeline
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import os
|
| 5 |
+
import random
|
| 6 |
+
import torch
|
| 7 |
+
from urllib.parse import quote
|
| 8 |
+
|
| 9 |
+
print(os.system("""
|
| 10 |
+
cd front;
|
| 11 |
+
npm ci;
|
| 12 |
+
npm run build;
|
| 13 |
+
cd ..;
|
| 14 |
+
"""))
|
| 15 |
+
|
| 16 |
+
CHAR_LIMIT = 5000 # test
|
| 17 |
+
|
| 18 |
+
CUDA_AVAILABLE = torch.cuda.is_available()
|
| 19 |
+
models = {gpu: KModel().to('cuda' if gpu else 'cpu').eval() for gpu in [False] + ([True] if CUDA_AVAILABLE else [])}
|
| 20 |
+
pipelines = {lang_code: KPipeline(lang_code=lang_code, model=False) for lang_code in 'ab'}
|
| 21 |
+
pipelines['a'].g2p.lexicon.golds['kokoro'] = 'kΛOkΙΙΉO'
|
| 22 |
+
pipelines['b'].g2p.lexicon.golds['kokoro'] = 'kΛQkΙΙΉQ'
|
| 23 |
+
|
| 24 |
+
@spaces.GPU(duration=30)
|
| 25 |
+
def forward_gpu(ps, ref_s, speed):
|
| 26 |
+
return models[True](ps, ref_s, speed)
|
| 27 |
+
|
| 28 |
+
def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
|
| 29 |
+
text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
|
| 30 |
+
pipeline = pipelines[voice[0]]
|
| 31 |
+
pack = pipeline.load_voice(voice)
|
| 32 |
+
use_gpu = use_gpu and CUDA_AVAILABLE
|
| 33 |
+
for _, ps, _ in pipeline(text, voice, speed):
|
| 34 |
+
ref_s = pack[len(ps)-1]
|
| 35 |
+
try:
|
| 36 |
+
if use_gpu:
|
| 37 |
+
audio = forward_gpu(ps, ref_s, speed)
|
| 38 |
+
else:
|
| 39 |
+
audio = models[False](ps, ref_s, speed)
|
| 40 |
+
except gr.exceptions.Error as e:
|
| 41 |
+
if use_gpu:
|
| 42 |
+
gr.Warning(str(e))
|
| 43 |
+
gr.Info('Retrying with CPU. To avoid this error, change Hardware to CPU.')
|
| 44 |
+
audio = models[False](ps, ref_s, speed)
|
| 45 |
+
else:
|
| 46 |
+
raise gr.Error(e)
|
| 47 |
+
return (24000, audio.numpy()), ps
|
| 48 |
+
return None, ''
|
| 49 |
+
|
| 50 |
+
# Arena API
|
| 51 |
+
def predict(text, voice='af_heart', speed=1):
|
| 52 |
+
return generate_first(text, voice, speed, use_gpu=False)[0]
|
| 53 |
+
|
| 54 |
+
def tokenize_first(text, voice='af_heart'):
|
| 55 |
+
pipeline = pipelines[voice[0]]
|
| 56 |
+
for _, ps, _ in pipeline(text, voice):
|
| 57 |
+
return ps
|
| 58 |
+
return ''
|
| 59 |
+
|
| 60 |
+
def generate_all(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
|
| 61 |
+
text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
|
| 62 |
+
pipeline = pipelines[voice[0]]
|
| 63 |
+
pack = pipeline.load_voice(voice)
|
| 64 |
+
use_gpu = use_gpu and CUDA_AVAILABLE
|
| 65 |
+
first = True
|
| 66 |
+
for _, ps, _ in pipeline(text, voice, speed):
|
| 67 |
+
ref_s = pack[len(ps)-1]
|
| 68 |
+
try:
|
| 69 |
+
if use_gpu:
|
| 70 |
+
audio = forward_gpu(ps, ref_s, speed)
|
| 71 |
+
else:
|
| 72 |
+
audio = models[False](ps, ref_s, speed)
|
| 73 |
+
except gr.exceptions.Error as e:
|
| 74 |
+
if use_gpu:
|
| 75 |
+
gr.Warning(str(e))
|
| 76 |
+
gr.Info('Switching to CPU')
|
| 77 |
+
audio = models[False](ps, ref_s, speed)
|
| 78 |
+
else:
|
| 79 |
+
raise gr.Error(e)
|
| 80 |
+
yield 24000, audio.numpy()
|
| 81 |
+
if first:
|
| 82 |
+
first = False
|
| 83 |
+
yield 24000, torch.zeros(1).numpy()
|
| 84 |
+
|
| 85 |
+
CHOICES = {
|
| 86 |
+
'πΊπΈ πΊ Heart β€οΈ': 'af_heart',
|
| 87 |
+
'πΊπΈ πΊ Bella π₯': 'af_bella',
|
| 88 |
+
'πΊπΈ πΊ Nicole π§': 'af_nicole',
|
| 89 |
+
'πΊπΈ πΊ Aoede': 'af_aoede',
|
| 90 |
+
'πΊπΈ πΊ Kore': 'af_kore',
|
| 91 |
+
'πΊπΈ πΊ Sarah': 'af_sarah',
|
| 92 |
+
'πΊπΈ πΊ Nova': 'af_nova',
|
| 93 |
+
'πΊπΈ πΊ Sky': 'af_sky',
|
| 94 |
+
'πΊπΈ πΊ Alloy': 'af_alloy',
|
| 95 |
+
'πΊπΈ πΊ Jessica': 'af_jessica',
|
| 96 |
+
'πΊπΈ πΊ River': 'af_river',
|
| 97 |
+
'πΊπΈ πΉ Michael': 'am_michael',
|
| 98 |
+
'πΊπΈ πΉ Fenrir': 'am_fenrir',
|
| 99 |
+
'πΊπΈ πΉ Puck': 'am_puck',
|
| 100 |
+
'πΊπΈ πΉ Echo': 'am_echo',
|
| 101 |
+
'πΊπΈ πΉ Eric': 'am_eric',
|
| 102 |
+
'πΊπΈ πΉ Liam': 'am_liam',
|
| 103 |
+
'πΊπΈ πΉ Onyx': 'am_onyx',
|
| 104 |
+
'πΊπΈ πΉ Santa': 'am_santa',
|
| 105 |
+
'πΊπΈ πΉ Adam': 'am_adam',
|
| 106 |
+
'π¬π§ πΊ Emma': 'bf_emma',
|
| 107 |
+
'π¬π§ πΊ Isabella': 'bf_isabella',
|
| 108 |
+
'π¬π§ πΊ Alice': 'bf_alice',
|
| 109 |
+
'π¬π§ πΊ Lily': 'bf_lily',
|
| 110 |
+
'π¬π§ πΉ George': 'bm_george',
|
| 111 |
+
'π¬π§ πΉ Fable': 'bm_fable',
|
| 112 |
+
'π¬π§ πΉ Lewis': 'bm_lewis',
|
| 113 |
+
'π¬π§ πΉ Daniel': 'bm_daniel',
|
| 114 |
+
}
|
| 115 |
+
for v in CHOICES.values():
|
| 116 |
+
pipelines[v[0]].load_voice(v)
|
| 117 |
+
|
| 118 |
+
TOKEN_NOTE = '''
|
| 119 |
+
π‘ Customize pronunciation with Markdown link syntax and /slashes/ like `[Kokoro](/kΛOkΙΙΉO/)`
|
| 120 |
+
|
| 121 |
+
π¬ To adjust intonation, try punctuation `;:,.!?ββ¦"()ββ` or stress `Λ` and `Λ`
|
| 122 |
+
|
| 123 |
+
β¬οΈ Lower stress `[1 level](-1)` or `[2 levels](-2)`
|
| 124 |
+
|
| 125 |
+
β¬οΈ Raise stress 1 level `[or](+2)` 2 levels (only works on less stressed, usually short words)
|
| 126 |
+
'''
|
| 127 |
+
|
| 128 |
+
with gr.Blocks() as generate_tab:
|
| 129 |
+
out_audio = gr.Audio(label='Output Audio', interactive=False, streaming=False, autoplay=True)
|
| 130 |
+
generate_btn = gr.Button('Generate', variant='primary')
|
| 131 |
+
with gr.Accordion('Output Tokens', open=True):
|
| 132 |
+
out_ps = gr.Textbox(interactive=False, show_label=False, info='Tokens used to generate the audio, up to 510 context length.')
|
| 133 |
+
tokenize_btn = gr.Button('Tokenize', variant='secondary')
|
| 134 |
+
gr.Markdown(TOKEN_NOTE)
|
| 135 |
+
predict_btn = gr.Button('Predict', variant='secondary', visible=False)
|
| 136 |
+
|
| 137 |
+
STREAM_NOTE = ['β οΈ There is an unknown Gradio bug that might yield no audio the first time you click `Stream`.']
|
| 138 |
+
if CHAR_LIMIT is not None:
|
| 139 |
+
STREAM_NOTE.append(f'βοΈ Each stream is capped at {CHAR_LIMIT} characters.')
|
| 140 |
+
STREAM_NOTE.append('π Want more characters? You can [use Kokoro directly](https://huggingface.co/hexgrad/Kokoro-82M#usage) or duplicate this space:')
|
| 141 |
+
STREAM_NOTE = '\n\n'.join(STREAM_NOTE)
|
| 142 |
+
|
| 143 |
+
with gr.Blocks() as stream_tab:
|
| 144 |
+
out_stream = gr.Audio(label='Output Audio Stream', interactive=False, streaming=True, autoplay=True)
|
| 145 |
+
with gr.Row():
|
| 146 |
+
stream_btn = gr.Button('Stream', variant='primary')
|
| 147 |
+
stop_btn = gr.Button('Stop', variant='stop')
|
| 148 |
+
with gr.Accordion('Note', open=True):
|
| 149 |
+
gr.Markdown(STREAM_NOTE)
|
| 150 |
+
gr.DuplicateButton()
|
| 151 |
+
|
| 152 |
+
API_NAME = 'tts'
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
head = f'''
|
| 156 |
+
<script>
|
| 157 |
+
document.addEventListener('DOMContentLoaded', () => {{
|
| 158 |
+
console.log('DOM content loaded');
|
| 159 |
+
if (!localStorage.getItem('debug') && !window.location.href.match(/debug=1/)) {{
|
| 160 |
+
console.log('Attaching frontend app');
|
| 161 |
+
const frontendApp = document.createElement('div');
|
| 162 |
+
frontendApp.style = 'position: fixed; top: 0; left: 0; width: 100%; height: 100%; border: none; z-index: 999999; background: #333; color: white; font-size: 1.2em; padding: 20px; text-align: center;';
|
| 163 |
+
frontendApp.innerHTML = "<br/><br/><br/>This app is used as backend for kokoro-podcast-generator; do not use it directly.";
|
| 164 |
+
document.body.appendChild(frontendApp);
|
| 165 |
+
}}
|
| 166 |
+
}});
|
| 167 |
+
</script>
|
| 168 |
+
'''
|
| 169 |
+
|
| 170 |
+
with gr.Blocks(head=head) as app:
|
| 171 |
+
with gr.Row():
|
| 172 |
+
with gr.Column():
|
| 173 |
+
text = gr.Textbox(label='Input Text', info=f"Up to ~500 characters per Generate, or {'β' if CHAR_LIMIT is None else CHAR_LIMIT} characters per Stream")
|
| 174 |
+
voice = gr.Dropdown(list(CHOICES.items()), value='af_heart', label='Voice', info='Quality and availability vary by language')
|
| 175 |
+
speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='Speed')
|
| 176 |
+
with gr.Column():
|
| 177 |
+
gr.TabbedInterface([generate_tab, stream_tab], ['Generate', 'Stream'])
|
| 178 |
+
generate_btn.click(fn=generate_first, inputs=[text, voice, speed], outputs=[out_audio, out_ps], api_name=API_NAME)
|
| 179 |
+
tokenize_btn.click(fn=tokenize_first, inputs=[text, voice], outputs=[out_ps], api_name=API_NAME)
|
| 180 |
+
stream_event = stream_btn.click(fn=generate_all, inputs=[text, voice, speed], outputs=[out_stream], api_name=API_NAME)
|
| 181 |
+
stop_btn.click(fn=None, cancels=stream_event)
|
| 182 |
+
predict_btn.click(fn=predict, inputs=[text, voice, speed], outputs=[out_audio], api_name=API_NAME)
|
| 183 |
+
|
| 184 |
+
if __name__ == '__main__':
|
| 185 |
+
app.queue(api_open=True).launch(show_api=True, ssr_mode=True)
|
packages.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
espeak-ng
|
| 2 |
+
nodejs
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
kokoro>=0.7.16
|