Spaces:
Runtime error
Runtime error
Adding EleutherAI's own API
Browse files
duplex.py
CHANGED
|
@@ -48,12 +48,12 @@ tts_model_name = "facebook/tts_transformer-es-css10"
|
|
| 48 |
speak_es = gr.Interface.load(f"huggingface/{tts_model_name}", api_key=HF_AUTH_TOKEN)
|
| 49 |
transcribe_es = lambda input_file: asr_es(input_file, chunk_length_s=5, stride_length_s=1)["text"]
|
| 50 |
def generate_es(text, **kwargs):
|
| 51 |
-
# max_length=100, top_k=100, top_p=50, temperature=0.95, do_sample=True, do_clean=True
|
| 52 |
api_uri = "https://hf.space/embed/bertin-project/bertin-gpt-j-6B/+/api/predict/"
|
| 53 |
-
response = requests.post(api_uri, data=json.dumps({"data": [text,
|
| 54 |
if response.ok:
|
| 55 |
if DEBUG:
|
| 56 |
-
print(response.json())
|
| 57 |
return response.json()["data"][0]
|
| 58 |
else:
|
| 59 |
return ""
|
|
@@ -71,17 +71,22 @@ asr_en = pipeline(
|
|
| 71 |
tts_model_name = "facebook/fastspeech2-en-ljspeech"
|
| 72 |
speak_en = gr.Interface.load(f"huggingface/{tts_model_name}", api_key=HF_AUTH_TOKEN)
|
| 73 |
transcribe_en = lambda input_file: asr_en(input_file, chunk_length_s=5, stride_length_s=1)["text"]
|
| 74 |
-
generate_iface = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B", api_key=HF_AUTH_TOKEN)
|
| 75 |
|
| 76 |
empty_audio = 'empty.flac'
|
| 77 |
sf.write(empty_audio, [], 16000)
|
| 78 |
deuncase = gr.Interface.load("huggingface/pere/DeUnCaser", api_key=HF_AUTH_TOKEN)
|
| 79 |
|
| 80 |
def generate_en(text, **kwargs):
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
|
| 87 |
def select_lang(lang):
|
|
@@ -135,7 +140,7 @@ def chat_with_gpt(lang, agent, user, context, audio_in, history):
|
|
| 135 |
# agent = AGENT
|
| 136 |
# user = USER
|
| 137 |
generation_kwargs = {
|
| 138 |
-
"max_length":
|
| 139 |
# "top_k": top_k,
|
| 140 |
# "top_p": top_p,
|
| 141 |
# "temperature": temperature,
|
|
@@ -159,14 +164,18 @@ def chat_with_gpt(lang, agent, user, context, audio_in, history):
|
|
| 159 |
break
|
| 160 |
context += history_context
|
| 161 |
for _ in range(5):
|
| 162 |
-
response = generate(f"{context}\n\n{user}: {message}.\n", **generation_kwargs)
|
| 163 |
if DEBUG:
|
| 164 |
print("\n-----" + response + "-----\n")
|
| 165 |
-
response = response.split("\n")[-1]
|
| 166 |
-
if agent in response and response.split(agent)[-1]:
|
| 167 |
-
|
| 168 |
-
if user in response and response.split(user)[-1]:
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
if response and response[0] in string.punctuation:
|
| 171 |
response = response[1:].strip()
|
| 172 |
if response.strip().startswith(f"{user}: {message}"):
|
|
|
|
| 48 |
speak_es = gr.Interface.load(f"huggingface/{tts_model_name}", api_key=HF_AUTH_TOKEN)
|
| 49 |
transcribe_es = lambda input_file: asr_es(input_file, chunk_length_s=5, stride_length_s=1)["text"]
|
| 50 |
def generate_es(text, **kwargs):
|
| 51 |
+
# text="Promtp", max_length=100, top_k=100, top_p=50, temperature=0.95, do_sample=True, do_clean=True
|
| 52 |
api_uri = "https://hf.space/embed/bertin-project/bertin-gpt-j-6B/+/api/predict/"
|
| 53 |
+
response = requests.post(api_uri, data=json.dumps({"data": [text, kwargs["max_length"], 100, 50, 0.95, True, True]}))
|
| 54 |
if response.ok:
|
| 55 |
if DEBUG:
|
| 56 |
+
print("Spanish response >", response.json())
|
| 57 |
return response.json()["data"][0]
|
| 58 |
else:
|
| 59 |
return ""
|
|
|
|
| 71 |
tts_model_name = "facebook/fastspeech2-en-ljspeech"
|
| 72 |
speak_en = gr.Interface.load(f"huggingface/{tts_model_name}", api_key=HF_AUTH_TOKEN)
|
| 73 |
transcribe_en = lambda input_file: asr_en(input_file, chunk_length_s=5, stride_length_s=1)["text"]
|
| 74 |
+
# generate_iface = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B", api_key=HF_AUTH_TOKEN)
|
| 75 |
|
| 76 |
empty_audio = 'empty.flac'
|
| 77 |
sf.write(empty_audio, [], 16000)
|
| 78 |
deuncase = gr.Interface.load("huggingface/pere/DeUnCaser", api_key=HF_AUTH_TOKEN)
|
| 79 |
|
| 80 |
def generate_en(text, **kwargs):
|
| 81 |
+
api_uri = "https://api.eleuther.ai/completion"
|
| 82 |
+
#--data-raw '{"context":"Promtp","top_p":0.9,"temp":0.8,"response_length":128,"remove_input":true}'
|
| 83 |
+
response = requests.post(api_uri, data=json.dumps({"context": text, "top_p": 0.9, "temp": 0.8, "response_length": kwargs["max_length"], "remove_input": True}))
|
| 84 |
+
if response.ok:
|
| 85 |
+
if DEBUG:
|
| 86 |
+
print("English response >", response.json())
|
| 87 |
+
return response.json()[0]["generated_text"].lstrip()
|
| 88 |
+
else:
|
| 89 |
+
return ""
|
| 90 |
|
| 91 |
|
| 92 |
def select_lang(lang):
|
|
|
|
| 140 |
# agent = AGENT
|
| 141 |
# user = USER
|
| 142 |
generation_kwargs = {
|
| 143 |
+
"max_length": 50,
|
| 144 |
# "top_k": top_k,
|
| 145 |
# "top_p": top_p,
|
| 146 |
# "temperature": temperature,
|
|
|
|
| 164 |
break
|
| 165 |
context += history_context
|
| 166 |
for _ in range(5):
|
| 167 |
+
response = generate(f"{context}\n\n{user}: {message}.\n", context_length=context_length, **generation_kwargs)
|
| 168 |
if DEBUG:
|
| 169 |
print("\n-----" + response + "-----\n")
|
| 170 |
+
# response = response.split("\n")[-1]
|
| 171 |
+
# if agent in response and response.split(agent)[-1]:
|
| 172 |
+
# response = response.split(agent)[-1]
|
| 173 |
+
# if user in response and response.split(user)[-1]:
|
| 174 |
+
# response = response.split(user)[-1]
|
| 175 |
+
# Take the first response
|
| 176 |
+
response = [
|
| 177 |
+
r for r in response.split(f"{AGENT}:") if r.strip()
|
| 178 |
+
][0].split(USER)[0].replace(f"{AGENT}:", "\n").strip()
|
| 179 |
if response and response[0] in string.punctuation:
|
| 180 |
response = response[1:].strip()
|
| 181 |
if response.strip().startswith(f"{user}: {message}"):
|