Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -227,11 +227,19 @@ def StreamLLMChatResponse(prompt):
|
|
| 227 |
except:
|
| 228 |
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
| 229 |
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
st.
|
|
|
|
| 234 |
return response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
def get_output(prompt):
|
| 236 |
return query({"inputs": prompt})
|
| 237 |
|
|
@@ -518,8 +526,8 @@ def get_zip_download_link(zip_file):
|
|
| 518 |
#API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
|
| 519 |
# Latest small - 1/26
|
| 520 |
API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small"
|
| 521 |
-
MODEL2 = "openai/whisper-small.en"
|
| 522 |
-
MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
|
| 523 |
|
| 524 |
HF_KEY = st.secrets['HF_KEY']
|
| 525 |
headers = {
|
|
@@ -528,12 +536,7 @@ headers = {
|
|
| 528 |
}
|
| 529 |
|
| 530 |
#@st.cache_resource
|
| 531 |
-
|
| 532 |
-
with open(filename, "rb") as f:
|
| 533 |
-
data = f.read
|
| 534 |
-
st.write('Posting request to model ' + API_URL_IE)
|
| 535 |
-
response = requests.post(API_URL_IE, headers=headers, data=data)
|
| 536 |
-
return response.json()
|
| 537 |
|
| 538 |
def generate_filename(prompt, file_type):
|
| 539 |
central = pytz.timezone('US/Central')
|
|
|
|
| 227 |
except:
|
| 228 |
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
| 229 |
|
| 230 |
+
def query(filename):
|
| 231 |
+
with open(filename, "rb") as f:
|
| 232 |
+
data = f.read
|
| 233 |
+
st.write('Posting request to model ' + API_URL_IE)
|
| 234 |
+
response = requests.post(API_URL_IE, headers=headers, data=data)
|
| 235 |
return response.json()
|
| 236 |
+
|
| 237 |
+
# 4. Run query with payload
|
| 238 |
+
#def query(payload):
|
| 239 |
+
# response = requests.post(API_URL, headers=headers, json=payload)
|
| 240 |
+
# st.markdown(response.json())
|
| 241 |
+
# return response.json()
|
| 242 |
+
|
| 243 |
def get_output(prompt):
|
| 244 |
return query({"inputs": prompt})
|
| 245 |
|
|
|
|
| 526 |
#API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
|
| 527 |
# Latest small - 1/26
|
| 528 |
API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small"
|
| 529 |
+
#MODEL2 = "openai/whisper-small.en"
|
| 530 |
+
#MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
|
| 531 |
|
| 532 |
HF_KEY = st.secrets['HF_KEY']
|
| 533 |
headers = {
|
|
|
|
| 536 |
}
|
| 537 |
|
| 538 |
#@st.cache_resource
|
| 539 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 540 |
|
| 541 |
def generate_filename(prompt, file_type):
|
| 542 |
central = pytz.timezone('US/Central')
|