Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,10 +4,11 @@ from t2a import text_to_audio
|
|
| 4 |
import joblib
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
import numpy as np
|
|
|
|
| 7 |
|
| 8 |
reg = joblib.load('text_reg.joblib')
|
| 9 |
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
| 10 |
-
finetune =
|
| 11 |
|
| 12 |
def get_note_text(prompt):
|
| 13 |
prompt = prompt + " ->"
|
|
@@ -24,24 +25,22 @@ def get_note_text(prompt):
|
|
| 24 |
)
|
| 25 |
return response.choices[0].text.strip()
|
| 26 |
|
| 27 |
-
def get_drummer_output(prompt
|
| 28 |
-
openai.api_key =
|
| 29 |
note_text = get_note_text(prompt)
|
| 30 |
# note_text = note_text + " " + note_text
|
| 31 |
-
# note_text = "k n k n k n k n s n h n k n s n k n k n k n k n k n k n h n k n n"
|
| 32 |
prompt_enc = model.encode([prompt])
|
| 33 |
bpm = int(reg.predict(prompt_enc)[0]) + 20
|
| 34 |
print(bpm, "bpm", "notes are", note_text)
|
| 35 |
audio = text_to_audio(note_text, bpm)
|
| 36 |
-
# audio to numpy
|
| 37 |
audio = np.array(audio.get_array_of_samples(), dtype=np.float32)
|
| 38 |
return (96000, audio)
|
| 39 |
|
| 40 |
iface = gr.Interface(
|
| 41 |
fn=get_drummer_output,
|
| 42 |
-
inputs=
|
| 43 |
outputs="audio",
|
| 44 |
-
title='
|
| 45 |
-
description=
|
| 46 |
)
|
| 47 |
iface.launch()
|
|
|
|
| 4 |
import joblib
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
import numpy as np
|
| 7 |
+
import os
|
| 8 |
|
| 9 |
reg = joblib.load('text_reg.joblib')
|
| 10 |
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
| 11 |
+
finetune = os.environ['finetune']
|
| 12 |
|
| 13 |
def get_note_text(prompt):
|
| 14 |
prompt = prompt + " ->"
|
|
|
|
| 25 |
)
|
| 26 |
return response.choices[0].text.strip()
|
| 27 |
|
| 28 |
+
def get_drummer_output(prompt):
|
| 29 |
+
openai.api_key = os.environ['key']
|
| 30 |
note_text = get_note_text(prompt)
|
| 31 |
# note_text = note_text + " " + note_text
|
|
|
|
| 32 |
prompt_enc = model.encode([prompt])
|
| 33 |
bpm = int(reg.predict(prompt_enc)[0]) + 20
|
| 34 |
print(bpm, "bpm", "notes are", note_text)
|
| 35 |
audio = text_to_audio(note_text, bpm)
|
|
|
|
| 36 |
audio = np.array(audio.get_array_of_samples(), dtype=np.float32)
|
| 37 |
return (96000, audio)
|
| 38 |
|
| 39 |
iface = gr.Interface(
|
| 40 |
fn=get_drummer_output,
|
| 41 |
+
inputs="text",
|
| 42 |
outputs="audio",
|
| 43 |
+
title='Autodrummer',
|
| 44 |
+
description="Stable Diffusion for drum beats. Type in a genre and some descriptors (e.g., 'hiphop groove 808') to the prompt box and get a drum beat in that genre"
|
| 45 |
)
|
| 46 |
iface.launch()
|