Spaces:
Build error
Build error
| import os | |
| import gradio as gr | |
| import numpy as np | |
| from audio_pipe import SpeechToSpeechPipeline | |
| # io1 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_en-hk", api_key=os.environ['api_key']) | |
| # io2 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_hk-en", api_key=os.environ['api_key']) | |
| # io3 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_en-hk", api_key=os.environ['api_key']) | |
| # io4 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_hk-en", api_key=os.environ['api_key']) | |
| pipe1 = SpeechToSpeechPipeline("facebook/xm_transformer_s2ut_en-hk") | |
| pipe2 = SpeechToSpeechPipeline("facebook/xm_transformer_s2ut_hk-en") | |
| pipe3 = SpeechToSpeechPipeline("facebook/xm_transformer_unity_en-hk") | |
| pipe4 = SpeechToSpeechPipeline("facebook/xm_transformer_unity_hk-en") | |
| def inference(audio, model): | |
| if model == "xm_transformer_s2ut_en-hk": | |
| out_audio = pipe1(audio).get_config()["value"]["name"] | |
| elif model == "xm_transformer_s2ut_hk-en": | |
| out_audio = pipe2(audio).get_config()["value"]["name"] | |
| elif model == "xm_transformer_unity_en-hk": | |
| out_audio = pipe3(audio).get_config()["value"]["name"] | |
| else: | |
| out_audio = pipe4(audio).get_config()["value"]["name"] | |
| return out_audio | |
| css = """ | |
| .gradio-container { | |
| font-family: 'IBM Plex Sans', sans-serif; | |
| } | |
| .gr-button { | |
| color: black; | |
| border-color: grey; | |
| background: white; | |
| } | |
| input[type='range'] { | |
| accent-color: black; | |
| } | |
| .dark input[type='range'] { | |
| accent-color: #dfdfdf; | |
| } | |
| .container { | |
| max-width: 730px; | |
| margin: auto; | |
| padding-top: 1.5rem; | |
| } | |
| .details:hover { | |
| text-decoration: underline; | |
| } | |
| .gr-button { | |
| white-space: nowrap; | |
| } | |
| .gr-button:focus { | |
| border-color: rgb(147 197 253 / var(--tw-border-opacity)); | |
| outline: none; | |
| box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); | |
| --tw-border-opacity: 1; | |
| --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); | |
| --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); | |
| --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); | |
| --tw-ring-opacity: .5; | |
| } | |
| .footer { | |
| margin-bottom: 45px; | |
| margin-top: 35px; | |
| text-align: center; | |
| border-bottom: 1px solid #e5e5e5; | |
| } | |
| .footer>p { | |
| font-size: .8rem; | |
| display: inline-block; | |
| padding: 0 10px; | |
| transform: translateY(10px); | |
| background: white; | |
| } | |
| .dark .footer { | |
| border-color: #303030; | |
| } | |
| .dark .footer>p { | |
| background: #0b0f19; | |
| } | |
| .prompt h4{ | |
| margin: 1.25em 0 .25em 0; | |
| font-weight: bold; | |
| font-size: 115%; | |
| } | |
| .animate-spin { | |
| animation: spin 1s linear infinite; | |
| } | |
| @keyframes spin { | |
| from { | |
| transform: rotate(0deg); | |
| } | |
| to { | |
| transform: rotate(360deg); | |
| } | |
| } | |
| """ | |
| block = gr.Blocks(css=css) | |
| with block: | |
| gr.HTML( | |
| """ | |
| <div style="text-align: center; max-width: 700px; margin: 0 auto;"> | |
| <div | |
| style=" | |
| display: inline-flex; | |
| align-items: center; | |
| gap: 0.8rem; | |
| font-size: 1.75rem; | |
| " | |
| > | |
| <h1 style="font-weight: 900; margin-bottom: 7px;"> | |
| Hokkien Translation | |
| </h1> | |
| </div> | |
| <p style="margin-bottom: 10px; font-size: 94%"> | |
| A demo for fairseq speech-to-speech translation models. It supports S2UT and UnitY models for bidirectional Hokkien and English translation. Please select the model and record the input to submit. | |
| </p> | |
| </div> | |
| """ | |
| ) | |
| with gr.Group(): | |
| with gr.Box(): | |
| with gr.Row().style(mobile_collapse=False, equal_height=True): | |
| audio = gr.Audio( | |
| source="microphone", type="filepath", label="Input" | |
| ) | |
| btn = gr.Button("Submit") | |
| model = gr.Dropdown(choices=["xm_transformer_unity_en-hk", "xm_transformer_unity_hk-en", "xm_transformer_s2ut_en-hk", "xm_transformer_s2ut_hk-en"], value="xm_transformer_unity_en-hk",type="value", label="Model") | |
| # model = gr.Dropdown(choices=["xm_transformer_unity_en-hk", "xm_transformer_unity_hk-en"], value="xm_transformer_unity_en-hk",type="value", label="Model") | |
| out = gr.Audio(label="Output") | |
| btn.click(inference, inputs=[audio, model], outputs=out) | |
| gr.HTML(''' | |
| <div class="footer"> | |
| <p>Model by <a href="https://ai.facebook.com/" style="text-decoration: underline;" target="_blank">Meta AI</a> | |
| </p> | |
| </div> | |
| ''') | |
| block.launch() |