Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -152,7 +152,7 @@ def add_witty_humor_buttons():
|
|
| 152 |
if col7[0].button("More Funny Rhymes 🎙️"):
|
| 153 |
StreamLLMChatResponse(descriptions["More Funny Rhymes 🎙️"])
|
| 154 |
|
| 155 |
-
def
|
| 156 |
documentHTML5='''
|
| 157 |
<!DOCTYPE html>
|
| 158 |
<html>
|
|
@@ -180,7 +180,7 @@ def addDocumentHTML5(result):
|
|
| 180 |
'''
|
| 181 |
|
| 182 |
components.html(documentHTML5, width=1280, height=1024)
|
| 183 |
-
return result
|
| 184 |
|
| 185 |
|
| 186 |
# 3. Stream Llama Response
|
|
@@ -220,7 +220,7 @@ def StreamLLMChatResponse(prompt):
|
|
| 220 |
|
| 221 |
except:
|
| 222 |
st.write('Stream llm issue')
|
| 223 |
-
|
| 224 |
return result
|
| 225 |
except:
|
| 226 |
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
|
@@ -675,10 +675,8 @@ def main():
|
|
| 675 |
if len(file_contents) > 0:
|
| 676 |
if next_action=='open':
|
| 677 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 678 |
-
#addDocumentHTML5(file_contents)
|
| 679 |
if next_action=='md':
|
| 680 |
st.markdown(file_contents)
|
| 681 |
-
#addDocumentHTML5(file_contents)
|
| 682 |
if next_action=='search':
|
| 683 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 684 |
st.write('Reasoning with your inputs...')
|
|
@@ -687,9 +685,7 @@ def main():
|
|
| 687 |
response = StreamLLMChatResponse(file_contents)
|
| 688 |
filename = generate_filename(user_prompt, ".md")
|
| 689 |
create_file(filename, file_contents, response, should_save)
|
| 690 |
-
|
| 691 |
-
#addDocumentHTML5(file_contents)
|
| 692 |
-
addDocumentHTML5(response)
|
| 693 |
|
| 694 |
# old - gpt
|
| 695 |
#response = chat_with_model(user_prompt, file_contents, model_choice)
|
|
|
|
| 152 |
if col7[0].button("More Funny Rhymes 🎙️"):
|
| 153 |
StreamLLMChatResponse(descriptions["More Funny Rhymes 🎙️"])
|
| 154 |
|
| 155 |
+
def SpeechSynthesis(result):
|
| 156 |
documentHTML5='''
|
| 157 |
<!DOCTYPE html>
|
| 158 |
<html>
|
|
|
|
| 180 |
'''
|
| 181 |
|
| 182 |
components.html(documentHTML5, width=1280, height=1024)
|
| 183 |
+
#return result
|
| 184 |
|
| 185 |
|
| 186 |
# 3. Stream Llama Response
|
|
|
|
| 220 |
|
| 221 |
except:
|
| 222 |
st.write('Stream llm issue')
|
| 223 |
+
SpeechSynthesis(result)
|
| 224 |
return result
|
| 225 |
except:
|
| 226 |
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
|
|
|
| 675 |
if len(file_contents) > 0:
|
| 676 |
if next_action=='open':
|
| 677 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
|
|
|
| 678 |
if next_action=='md':
|
| 679 |
st.markdown(file_contents)
|
|
|
|
| 680 |
if next_action=='search':
|
| 681 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 682 |
st.write('Reasoning with your inputs...')
|
|
|
|
| 685 |
response = StreamLLMChatResponse(file_contents)
|
| 686 |
filename = generate_filename(user_prompt, ".md")
|
| 687 |
create_file(filename, file_contents, response, should_save)
|
| 688 |
+
SpeechSynthesis(response)
|
|
|
|
|
|
|
| 689 |
|
| 690 |
# old - gpt
|
| 691 |
#response = chat_with_model(user_prompt, file_contents, model_choice)
|