Spaces:
Sleeping
Sleeping
clementsan
commited on
Commit
·
8bef1bd
1
Parent(s):
ce888d8
Add third source reference
Browse files
app.py
CHANGED
|
@@ -209,16 +209,18 @@ def conversation(qa_chain, message, history):
|
|
| 209 |
response_sources = response["source_documents"]
|
| 210 |
response_source1 = response_sources[0].page_content.strip()
|
| 211 |
response_source2 = response_sources[1].page_content.strip()
|
|
|
|
| 212 |
# Langchain sources are zero-based
|
| 213 |
response_source1_page = response_sources[0].metadata["page"] + 1
|
| 214 |
response_source2_page = response_sources[1].metadata["page"] + 1
|
|
|
|
| 215 |
# print ('chat response: ', response_answer)
|
| 216 |
# print('DB source', response_sources)
|
| 217 |
|
| 218 |
# Append user message and response to chat history
|
| 219 |
new_history = history + [(message, response_answer)]
|
| 220 |
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
| 221 |
-
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page
|
| 222 |
|
| 223 |
|
| 224 |
def upload_file(file_obj):
|
|
@@ -285,6 +287,9 @@ def demo():
|
|
| 285 |
with gr.Row():
|
| 286 |
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
|
| 287 |
source2_page = gr.Number(label="Page", scale=1)
|
|
|
|
|
|
|
|
|
|
| 288 |
with gr.Row():
|
| 289 |
msg = gr.Textbox(placeholder="Type message", container=True)
|
| 290 |
with gr.Row():
|
|
@@ -300,21 +305,21 @@ def demo():
|
|
| 300 |
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
| 301 |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0], \
|
| 302 |
inputs=None, \
|
| 303 |
-
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
| 304 |
queue=False)
|
| 305 |
|
| 306 |
# Chatbot events
|
| 307 |
msg.submit(conversation, \
|
| 308 |
inputs=[qa_chain, msg, chatbot], \
|
| 309 |
-
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
| 310 |
queue=False)
|
| 311 |
submit_btn.click(conversation, \
|
| 312 |
inputs=[qa_chain, msg, chatbot], \
|
| 313 |
-
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
| 314 |
queue=False)
|
| 315 |
clear_btn.click(lambda:[None,"",0,"",0], \
|
| 316 |
inputs=None, \
|
| 317 |
-
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
| 318 |
queue=False)
|
| 319 |
demo.queue().launch(debug=True)
|
| 320 |
|
|
|
|
| 209 |
response_sources = response["source_documents"]
|
| 210 |
response_source1 = response_sources[0].page_content.strip()
|
| 211 |
response_source2 = response_sources[1].page_content.strip()
|
| 212 |
+
response_source3 = response_sources[2].page_content.strip()
|
| 213 |
# Langchain sources are zero-based
|
| 214 |
response_source1_page = response_sources[0].metadata["page"] + 1
|
| 215 |
response_source2_page = response_sources[1].metadata["page"] + 1
|
| 216 |
+
response_source3_page = response_sources[2].metadata["page"] + 1
|
| 217 |
# print ('chat response: ', response_answer)
|
| 218 |
# print('DB source', response_sources)
|
| 219 |
|
| 220 |
# Append user message and response to chat history
|
| 221 |
new_history = history + [(message, response_answer)]
|
| 222 |
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
| 223 |
+
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
| 224 |
|
| 225 |
|
| 226 |
def upload_file(file_obj):
|
|
|
|
| 287 |
with gr.Row():
|
| 288 |
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
|
| 289 |
source2_page = gr.Number(label="Page", scale=1)
|
| 290 |
+
with gr.Row():
|
| 291 |
+
doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
|
| 292 |
+
source3_page = gr.Number(label="Page", scale=1)
|
| 293 |
with gr.Row():
|
| 294 |
msg = gr.Textbox(placeholder="Type message", container=True)
|
| 295 |
with gr.Row():
|
|
|
|
| 305 |
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
| 306 |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0], \
|
| 307 |
inputs=None, \
|
| 308 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
| 309 |
queue=False)
|
| 310 |
|
| 311 |
# Chatbot events
|
| 312 |
msg.submit(conversation, \
|
| 313 |
inputs=[qa_chain, msg, chatbot], \
|
| 314 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
| 315 |
queue=False)
|
| 316 |
submit_btn.click(conversation, \
|
| 317 |
inputs=[qa_chain, msg, chatbot], \
|
| 318 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
| 319 |
queue=False)
|
| 320 |
clear_btn.click(lambda:[None,"",0,"",0], \
|
| 321 |
inputs=None, \
|
| 322 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
| 323 |
queue=False)
|
| 324 |
demo.queue().launch(debug=True)
|
| 325 |
|