Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -113,12 +113,12 @@ def vote_last_response(state, vote_type, model_selector):
|
|
| 113 |
|
| 114 |
|
| 115 |
def upvote_last_response(state):
|
| 116 |
-
vote_last_response(state, "upvote", "
|
| 117 |
gr.Info("Thank you for your voting!")
|
| 118 |
return state
|
| 119 |
|
| 120 |
def downvote_last_response(state):
|
| 121 |
-
vote_last_response(state, "downvote", "
|
| 122 |
gr.Info("Thank you for your voting!")
|
| 123 |
return state
|
| 124 |
|
|
@@ -420,7 +420,7 @@ def bot(history, temperature, top_p, max_output_tokens):
|
|
| 420 |
with open(get_conv_log_filename(), "a") as fout:
|
| 421 |
data = {
|
| 422 |
"type": "chat",
|
| 423 |
-
"model": "
|
| 424 |
"state": history,
|
| 425 |
"images": all_image_hash,
|
| 426 |
"images_path": all_image_path
|
|
@@ -543,7 +543,7 @@ with gr.Blocks(
|
|
| 543 |
gr.Markdown(learn_more_markdown)
|
| 544 |
gr.Markdown(bibtext)
|
| 545 |
|
| 546 |
-
|
| 547 |
add_message, [chatbot, chat_input], [chatbot, chat_input]
|
| 548 |
).then(bot, [chatbot, temperature, top_p, max_output_tokens], chatbot, api_name="bot_response").then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
| 549 |
|
|
@@ -591,6 +591,6 @@ if __name__ == "__main__":
|
|
| 591 |
filt_invalid = "cut"
|
| 592 |
model_name = get_model_name_from_path(args.model_path)
|
| 593 |
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
|
| 594 |
-
|
| 595 |
chat_image_num = 0
|
| 596 |
demo.launch()
|
|
|
|
| 113 |
|
| 114 |
|
| 115 |
def upvote_last_response(state):
|
| 116 |
+
vote_last_response(state, "upvote", "Pangea-7b")
|
| 117 |
gr.Info("Thank you for your voting!")
|
| 118 |
return state
|
| 119 |
|
| 120 |
def downvote_last_response(state):
|
| 121 |
+
vote_last_response(state, "downvote", "Pangea-7b")
|
| 122 |
gr.Info("Thank you for your voting!")
|
| 123 |
return state
|
| 124 |
|
|
|
|
| 420 |
with open(get_conv_log_filename(), "a") as fout:
|
| 421 |
data = {
|
| 422 |
"type": "chat",
|
| 423 |
+
"model": "Pangea-7b",
|
| 424 |
"state": history,
|
| 425 |
"images": all_image_hash,
|
| 426 |
"images_path": all_image_path
|
|
|
|
| 543 |
gr.Markdown(learn_more_markdown)
|
| 544 |
gr.Markdown(bibtext)
|
| 545 |
|
| 546 |
+
chat_input.submit(
|
| 547 |
add_message, [chatbot, chat_input], [chatbot, chat_input]
|
| 548 |
).then(bot, [chatbot, temperature, top_p, max_output_tokens], chatbot, api_name="bot_response").then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
| 549 |
|
|
|
|
| 591 |
filt_invalid = "cut"
|
| 592 |
model_name = get_model_name_from_path(args.model_path)
|
| 593 |
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
|
| 594 |
+
model=model.to(torch.device('cuda'))
|
| 595 |
chat_image_num = 0
|
| 596 |
demo.launch()
|