Spaces:
Build error
Build error
Commit
·
3b8bc2d
1
Parent(s):
f81bd83
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,55 +1,29 @@
|
|
| 1 |
-
|
| 2 |
-
# for Summary Interface:
|
| 3 |
-
# >>>> https://huggingface.co/spaces/khxu/pegasus-text-summarizers/blob/main/app.py
|
| 4 |
-
# Audio Interface
|
| 5 |
-
# >>>> https://huggingface.co/spaces/iSky/Speech-audio-to-text-with-grammar-correction/blob/main/app.py
|
| 6 |
-
# Gramar
|
| 7 |
-
# >>>> https://huggingface.co/deep-learning-analytics/GrammarCorrector/blob/main/README.md
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
import gradio as gr
|
| 11 |
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
| 12 |
from gradio.mix import Parallel, Series
|
| 13 |
|
| 14 |
-
# >>>>>>>>>>>>>>>>>>>> Danger Below <<<<<<<<<<<<<<<<<<<<<<
|
| 15 |
-
# Load Interfaces:
|
| 16 |
-
s2t = gr.Interface.load('huggingface/hf-internal-testing/processor_with_lm')
|
| 17 |
-
grammar = gr.Interface.load('huggingface/deep-learning-analytics/GrammarCorrector')
|
| 18 |
-
sum_it = gr.Interface.load('huggingface/SamuelMiller/lil_sum_sum')
|
| 19 |
-
|
| 20 |
-
# Audio Functions:
|
| 21 |
-
def out(audio):
|
| 22 |
-
flag = True
|
| 23 |
-
if audio==None:
|
| 24 |
-
return "no audio"
|
| 25 |
-
|
| 26 |
-
elif flag:
|
| 27 |
-
a = s2t(audio)
|
| 28 |
-
#g = grammar(a)
|
| 29 |
-
#s = sum_it(g) # Summarize Audio with sum_it
|
| 30 |
-
return a #grammar(a, num_return_sequences=1) # grammar(s), # Grammar Filter
|
| 31 |
-
|
| 32 |
-
else:
|
| 33 |
-
return "something is wrong in the function?"
|
| 34 |
-
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
inputs= gr.inputs.Audio(source="microphone", type="filepath", label=None, optional=True),
|
| 42 |
-
outputs= 'text'
|
| 43 |
-
)
|
| 44 |
|
| 45 |
-
# Launch Interface
|
| 46 |
-
iface.launch(enable_queue=True,show_error=True)
|
| 47 |
|
| 48 |
-
|
| 49 |
-
# gr.inputs.Audio(source="upload", type="filepath", label=None, optional=True),
|
| 50 |
-
# examples=[["Grammar-Correct-Sample.mp3"], ["Grammar-Wrong-Sample.mp3"],],
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
-
#gr.Interface(speech_to_text, inputs="mic", outputs=gr.Textbox(label="Predicted text", lines=4))
|
|
|
|
| 1 |
+
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from transformers import pipeline
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import streamlit as st
|
| 5 |
+
from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
|
| 6 |
from gradio.mix import Parallel, Series
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
# model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
|
| 10 |
+
# processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
|
| 11 |
+
# inputs = processor(ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt")
|
| 12 |
+
# generated_ids = model.generate(inputs["input_features"], attention_mask=inputs["attention_mask"])
|
| 13 |
+
# transcription = processor.batch_decode(generated_ids)
|
|
|
|
|
|
|
|
|
|
| 14 |
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
desc = "Is this working or what??"
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
def summarize(text):
|
| 19 |
+
summ = gr.Interface.load('huggingface/google/pegasus-large')
|
| 20 |
+
summary = summ(text)
|
| 21 |
+
return summary
|
| 22 |
+
iface = gr.Interface(fn=summarize,
|
| 23 |
+
theme='huggingface',
|
| 24 |
+
title= 'sum_it',
|
| 25 |
+
description= desc,
|
| 26 |
+
inputs= "text",
|
| 27 |
+
outputs= 'textbox')
|
| 28 |
+
iface.launch(inline = False)
|
| 29 |
|
|
|