# # import gradio as gr
# # from openai import OpenAI
# # # 🔹 Configure your agent
# # agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
# # agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
# # client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
# # # Parameters
# # DEFAULT_RETRIEVAL_RUNS = 3 # adjustable in UI
# # def policy_chat(message, history, retrieval_runs=DEFAULT_RETRIEVAL_RUNS):
# # """
# # Chatbot with streaming + multiple retrieval runs.
# # """
# # # Show "processing" placeholder
# # history = history + [[message, "Processing..."]]
# # yield history, history
# # aggregated_responses = []
# # for run in range(retrieval_runs):
# # try:
# # stream = client.chat.completions.create(
# # model="n/a", # agent handles routing
# # messages=[
# # {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
# # *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
# # for i, (u, b) in enumerate(history[:-1])], # exclude placeholder
# # {"role": "user", "content": message},
# # ],
# # extra_body={"include_retrieval_info": True},
# # stream=True,
# # )
# # response_text = ""
# # for chunk in stream:
# # delta = chunk.choices[0].delta
# # if delta and delta.content: # delta.content is a string or None
# # response_text += delta.content
# # # Stream update to Gradio UI
# # history[-1][1] = response_text
# # yield history, history
# # aggregated_responses.append(response_text or "⚠️ Empty response")
# # except Exception as e:
# # aggregated_responses.append(f"⚠️ Error during run {run+1}: {str(e)}")
# # # 🔹 Choose the “best” response (longest for now)
# # best_response = max(aggregated_responses, key=len, default="⚠️ No response")
# # history[-1][1] = best_response
# # yield history, history
# # with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
# # gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I’ll query the knowledge base multiple times to retrieve the best answer for you!")
# # popup_widget_html = '''
# #
'''
# # gr.HTML(popup_widget_html)
# # chatbot = gr.Chatbot(height=500)
# # msg = gr.Textbox(placeholder="Type your question...")
# # retrieval_slider = gr.Slider(1, 10, value=DEFAULT_RETRIEVAL_RUNS, step=1, label="Number of retrieval runs")
# # clear = gr.Button("Clear Chat")
# # msg.submit(policy_chat, [msg, chatbot, retrieval_slider], [chatbot, chatbot])
# # msg.submit(lambda: "", None, msg) # clear textbox
# # clear.click(lambda: None, None, chatbot, queue=False)
# # if __name__ == "__main__":
# # demo.launch(debug=True)
# import gradio as gr
# from openai import OpenAI
# # 🔹 Configure your agent
# agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
# agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
# client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
# # Parameters
# DEFAULT_RETRIEVAL_RUNS = 3 # adjustable in UI
# stop_flag = {"stop": False} # global flag for stopping runs
# def stop_runs():
# """Set stop flag to True to interrupt current processing."""
# stop_flag["stop"] = True
# return "⛔ Retrieval stopped by user."
# def policy_chat(message, history, retrieval_runs=DEFAULT_RETRIEVAL_RUNS):
# """
# Runs multiple retrievals, stores responses, and returns the best one.
# """
# # Reset stop flag
# stop_flag["stop"] = False
# # Show "processing" placeholder in UI
# history = history + [[message, "Processing..."]]
# yield history, history
# aggregated_responses = []
# for run in range(retrieval_runs):
# if stop_flag["stop"]:
# history[-1][1] = "⛔ Stopped before completion."
# yield history, history
# return
# try:
# # Stream response but collect full text silently
# stream = client.chat.completions.create(
# model="n/a", # agent handles routing
# messages=[
# {"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
# *[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
# for i, (u, b) in enumerate(history[:-1])], # exclude placeholder
# {"role": "user", "content": message},
# ],
# extra_body={"include_retrieval_info": True},
# stream=True,
# )
# response_text = ""
# for chunk in stream:
# delta = chunk.choices[0].delta
# if delta and delta.content:
# response_text += delta.content
# aggregated_responses.append(response_text or "⚠️ Empty response")
# except Exception as e:
# aggregated_responses.append(f"⚠️ Error during run {run+1}: {str(e)}")
# # --- Selection logic ---
# # For now, pick the "best" as the longest response
# best_response = max(aggregated_responses, key=len, default="⚠️ No response")
# # Replace "Processing..." with final answer
# history[-1][1] = best_response
# yield history, history
# with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
# gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I’ll query the knowledge base multiple times!")
# chatbot = gr.Chatbot(height=500)
# msg = gr.Textbox(placeholder="Type your question...")
# retrieval_slider = gr.Slider(1, 10, value=DEFAULT_RETRIEVAL_RUNS, step=1, label="Number of retrieval runs")
# with gr.Row():
# clear = gr.Button("Clear Chat")
# stop = gr.Button("⛔ Stop")
# msg.submit(policy_chat, [msg, chatbot, retrieval_slider], [chatbot, chatbot])
# msg.submit(lambda: "", None, msg) # clear textbox
# clear.click(lambda: None, None, chatbot, queue=False)
# stop.click(stop_runs, None, chatbot, queue=False)
# if __name__ == "__main__":
# demo.launch(debug=True)
import gradio as gr
from openai import OpenAI
from datetime import datetime
# 🔹 Configure your agent
agent_endpoint = "https://q77iuwf7ncfemoonbzon2iyd.agents.do-ai.run/api/v1/"
agent_access_key = "CzIwmTIDFNWRRIHvxVNzKWztq8rn5S5w"
client = OpenAI(base_url=agent_endpoint, api_key=agent_access_key)
# Global flag to control streaming
stop_flag = {"stop": False}
# Store reasoning context for export
REASONING_CONTEXT = ""
def stop_streaming():
"""Sets the stop flag to True to interrupt the streaming response."""
stop_flag["stop"] = True
def export_chat(history):
"""Exports the chat history to a formatted text file."""
if not history:
return None
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"chat_history_{timestamp}.txt"
formatted_chat = ""
for user_msg, bot_msg in history:
formatted_chat += f"You: {user_msg}\n"
formatted_chat += f"Bot: {bot_msg}\n\n"
# Add reasoning context to the export if it exists
if REASONING_CONTEXT:
formatted_chat += "\n" + "="*20 + "\n"
formatted_chat += "Last Reasoning Context:\n"
formatted_chat += "="*20 + "\n\n"
formatted_chat += REASONING_CONTEXT
with open(filename, "w", encoding="utf-8") as f:
f.write(formatted_chat)
return filename
def policy_chat(message, history):
"""
Single stream chat with real-time thinking process and a stop button.
"""
global REASONING_CONTEXT
# Reset stop flag at the beginning of a new request
stop_flag["stop"] = False
# 1. Show thinking indicator and stop button
yield {
chatbot: history,
thinking_indicator: gr.update(visible=True),
reasoning_accordion: gr.update(visible=False, value=""),
stop_btn: gr.update(visible=True),
send_btn: gr.update(interactive=False)
}
# Add user message and empty assistant response to history
history = history + [[message, ""]]
yield {
chatbot: history,
thinking_indicator: gr.update(visible=True),
reasoning_accordion: gr.update(visible=False, value=""),
stop_btn: gr.update(visible=True),
send_btn: gr.update(interactive=False)
}
try:
# 2. Simulate reasoning (this would be your retrieval logic integrated with DigitalOcean agent)
REASONING_CONTEXT = (
"### Retrieved Context\n\n"
"1. **Document:** `policy_document_A.pdf` (Page 17)\n"
" - **Content:** 'The primary objective for renewable energy is to increase its share in the national grid by 20% by the year 2030.'\n"
"2. **Document:** `strategy_paper_B.pdf` (Page 5)\n"
" - **Content:** 'Energy efficiency measures will be enforced through new building codes and industrial standards.'\n"
)
# Show reasoning and hide thinking indicator
yield {
chatbot: history,
thinking_indicator: gr.update(visible=False),
reasoning_accordion: gr.update(visible=False, value=REASONING_CONTEXT),
stop_btn: gr.update(visible=True),
send_btn: gr.update(interactive=False)
}
# Create streaming chat completion using DigitalOcean agent
stream = client.chat.completions.create(
model="n/a", # DigitalOcean agent handles routing
messages=[
{"role": "system", "content": "The data must be returned verbatim. Please be quite detailed and include all information. You are new to the analysis of policy documents, hence you need to be objective in retrieving information, and it is not expected that you will analyse and interpret the information."},
*[{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": b}
for i, (u, b) in enumerate(history[:-1])], # exclude current exchange
{"role": "user", "content": message},
],
extra_body={"include_retrieval_info": True},
stream=True,
)
# Stream response content in real-time
response_text = ""
for chunk in stream:
# Check if the stop flag has been set
if stop_flag["stop"]:
response_text += "\n\n⛔ **Streaming stopped by user.**"
history[-1][1] = response_text
yield {
chatbot: history,
thinking_indicator: gr.update(visible=False),
reasoning_accordion: gr.update(visible=False, value=REASONING_CONTEXT),
stop_btn: gr.update(visible=True),
send_btn: gr.update(interactive=False)
}
break # Exit the loop
delta = chunk.choices[0].delta
if delta and delta.content:
response_text += delta.content
# Update the last message in history with streaming content
history[-1][1] = response_text
yield {
chatbot: history,
thinking_indicator: gr.update(visible=False),
reasoning_accordion: gr.update(visible=False, value=REASONING_CONTEXT),
stop_btn: gr.update(visible=True),
send_btn: gr.update(interactive=False)
}
except Exception as e:
# Handle errors by updating the assistant message
history[-1][1] = f"⚠️ Error: {str(e)}"
yield {
chatbot: history,
thinking_indicator: gr.update(visible=False),
reasoning_accordion: gr.update(visible=False, value=REASONING_CONTEXT),
stop_btn: gr.update(visible=False),
send_btn: gr.update(interactive=True)
}
finally:
# Hide the stop button and re-enable send button once streaming is complete or stopped
yield {
chatbot: history,
thinking_indicator: gr.update(visible=False),
reasoning_accordion: gr.update(visible=False, value=REASONING_CONTEXT),
stop_btn: gr.update(visible=False),
send_btn: gr.update(interactive=True)
}
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="gray")) as demo:
gr.Markdown("# 🤖 Policy-Agent Chatbot\nAsk me about policies. I'll stream responses and show my reasoning.")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(height=600, label="Chat Window", show_copy_button=True)
thinking_indicator = gr.Markdown("🤔 Thinking...", visible=False)
reasoning_accordion = gr.Markdown(
"",
visible=False,
label="Visualise Reasoning (Retrieved Context)"
)
with gr.Column(scale=1):
export_btn = gr.Button("Export Chat")
export_file = gr.File(label="Download Chat History", interactive=False)
with gr.Row():
msg = gr.Textbox(
placeholder="Type your question and press Enter or click Send...",
scale=4,
show_label=False
)
send_btn = gr.Button("Send", scale=1)
with gr.Row():
stop_btn = gr.Button("⛔ Stop Streaming", visible=False)
clear_btn = gr.Button("Clear Chat")
# Define event handlers
def on_submit(message, history):
yield from policy_chat(message, history)
# Combine all UI updates into outputs list
chat_outputs = [chatbot, thinking_indicator, reasoning_accordion, stop_btn, send_btn]
# Wire up components
msg.submit(on_submit, [msg, chatbot], chat_outputs)
send_btn.click(on_submit, [msg, chatbot], chat_outputs)
# After submission, clear the textbox
msg.submit(lambda: "", None, msg, queue=False)
send_btn.click(lambda: "", None, msg, queue=False)
stop_btn.click(stop_streaming, None, None, queue=False)
clear_btn.click(lambda: ([], "", None), None, [chatbot, reasoning_accordion, export_file], queue=False)
export_btn.click(export_chat, chatbot, export_file)
if __name__ == "__main__":
demo.launch(debug=True, height=800)