ryanDing26 commited on
Commit
fbb09e2
Β·
1 Parent(s): 055024d
Files changed (1) hide show
  1. app.py +688 -144
app.py CHANGED
@@ -1,3 +1,639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import re
3
  import shutil
@@ -6,6 +642,7 @@ import gradio as gr
6
  from pathlib import Path
7
  from histopath.agent import A1
8
  from dotenv import load_dotenv
 
9
 
10
  # Load environment variables
11
  load_dotenv()
@@ -157,8 +794,19 @@ def format_message_for_display(parsed_output):
157
  return "\n\n".join(msg_parts)
158
 
159
 
160
- def process_agent_response(prompt, uploaded_file, chatbot_history):
161
- """Process the agent response and update chatbot - AGGRESSIVE FIX: Minimal yields."""
 
 
 
 
 
 
 
 
 
 
 
162
  global agent
163
 
164
  if agent is None:
@@ -223,56 +871,39 @@ def process_agent_response(prompt, uploaded_file, chatbot_history):
223
  for step in agent.go_stream(prompt):
224
  step_count += 1
225
  output = step.get("output", "")
226
-
227
- if output:
228
- collected_outputs.append(output)
229
 
230
- # CRITICAL FIX: Process ALL collected outputs at once
231
  for output in collected_outputs:
 
 
 
232
  parsed = parse_agent_output(output)
233
- formatted_message = format_message_for_display(parsed)
234
 
235
- # Update or append to chatbot history
236
- if chatbot_history and chatbot_history[-1]["role"] == "assistant":
237
- # Update the last assistant message
238
- chatbot_history[-1]["content"] = formatted_message
239
- else:
240
- # Add new assistant message
241
  chatbot_history.append({
242
  "role": "assistant",
243
- "content": formatted_message
244
  })
245
 
246
- # CRITICAL FIX: Check files only ONCE after all processing
247
  images, data = check_for_output_files()
 
248
 
249
- status_msg = f"βœ… Complete ({step_count} steps)"
250
- if images:
251
- status_msg += f" | {len(images)} image(s)"
252
- if data:
253
- status_msg += f" | {len(data)} data file(s)"
254
-
255
- # CRITICAL FIX: Final single yield with all results
256
- yield chatbot_history, images, data, None, None, status_msg
257
 
258
  except Exception as e:
259
  error_trace = traceback.format_exc()
260
- error_msg = f"❌ **Error:** {str(e)}\n\n<details>\n<summary>Stack Trace</summary>\n\n```\n{error_trace}\n```\n</details>"
261
-
262
  chatbot_history.append({
263
  "role": "assistant",
264
  "content": error_msg
265
  })
266
-
267
  yield chatbot_history, None, None, None, None, f"❌ Error: {str(e)}"
268
 
269
 
270
- def clear_chat():
271
- """Clear the chat history and outputs."""
272
- return [], None, None, None, None, "Ready"
273
-
274
-
275
- def validate_passcode(input_passcode):
276
  """Validate the passcode and initialize the agent."""
277
  global agent
278
 
@@ -282,129 +913,40 @@ def validate_passcode(input_passcode):
282
  agent = A1(
283
  path="./data",
284
  llm="claude-sonnet-4-20250514",
 
285
  use_tool_retriever=True,
286
  timeout_seconds=600
287
  )
 
288
  return (
289
  gr.update(visible=False), # Hide passcode section
290
  gr.update(visible=True), # Show main interface
291
- "βœ… Access granted! Agent initialized successfully."
292
  )
293
  except Exception as e:
294
- error_msg = f"❌ Failed to initialize agent: {str(e)}"
295
  return (
296
- gr.update(visible=True),
297
- gr.update(visible=False),
298
- error_msg
299
  )
300
  else:
301
  return (
302
- gr.update(visible=True),
303
- gr.update(visible=False),
304
  "❌ Invalid passcode. Please try again."
305
  )
306
- # batched streaming instead
307
- def process_agent_response_batched(prompt, uploaded_file, chatbot_history, batch_size=5):
308
- """Process agent response with BATCHED updates (every N steps)."""
309
- global agent
310
-
311
- if agent is None:
312
- chatbot_history.append({
313
- "role": "assistant",
314
- "content": "⚠️ Please enter the passcode first to initialize the agent."
315
- })
316
- yield chatbot_history, None, None, None, None, "⚠️ Agent not initialized"
317
- return
318
-
319
- if not prompt.strip() and uploaded_file is None:
320
- chatbot_history.append({
321
- "role": "assistant",
322
- "content": "⚠️ Please provide a prompt or upload a file."
323
- })
324
- yield chatbot_history, None, None, None, None, "⚠️ No input provided"
325
- return
326
-
327
- # Handle file upload
328
- file_path = None
329
- file_info = ""
330
- if uploaded_file is not None:
331
- try:
332
- data_dir = Path("./data")
333
- data_dir.mkdir(exist_ok=True)
334
-
335
- file_name = Path(uploaded_file.name).name
336
- file_path = data_dir / file_name
337
- shutil.copy(uploaded_file.name, file_path)
338
-
339
- file_info = f"\n\nπŸ“Ž **Uploaded file:** `{file_path}`\n"
340
-
341
- if prompt.strip():
342
- prompt = f"{prompt}\n\nUploaded file path: {file_path}"
343
- else:
344
- prompt = f"I have uploaded a file at: {file_path}. Please analyze it."
345
-
346
- except Exception as e:
347
- error_msg = f"❌ Error handling file upload: {str(e)}"
348
- chatbot_history.append({
349
- "role": "assistant",
350
- "content": error_msg
351
- })
352
- yield chatbot_history, None, None, None, None, error_msg
353
- return
354
-
355
- # Add user message to chat
356
- user_message = prompt if not file_info else f"{prompt}{file_info}"
357
- chatbot_history.append({"role": "user", "content": user_message})
358
- yield chatbot_history, None, None, None, None, "πŸ”„ Processing..."
359
 
360
- try:
361
- # Stream with batching
362
- step_count = 0
363
- batch_count = 0
364
-
365
- for step in agent.go_stream(prompt):
366
- step_count += 1
367
- output = step.get("output", "")
368
-
369
- if output:
370
- parsed = parse_agent_output(output)
371
- formatted_message = format_message_for_display(parsed)
372
-
373
- # Update chatbot history
374
- if chatbot_history and chatbot_history[-1]["role"] == "assistant":
375
- chatbot_history[-1]["content"] = formatted_message
376
- else:
377
- chatbot_history.append({
378
- "role": "assistant",
379
- "content": formatted_message
380
- })
381
-
382
- # Only yield every batch_size steps
383
- if step_count % batch_size == 0:
384
- batch_count += 1
385
- yield chatbot_history, None, None, None, None, f"πŸ”„ Step {step_count}..."
386
-
387
- # Final yield with files
388
- images, data = check_for_output_files()
389
-
390
- status_msg = f"βœ… Complete ({step_count} steps)"
391
- if images:
392
- status_msg += f" | {len(images)} image(s)"
393
- if data:
394
- status_msg += f" | {len(data)} data file(s)"
395
-
396
- yield chatbot_history, images, data, None, None, status_msg
397
-
398
- except Exception as e:
399
- error_trace = traceback.format_exc()
400
- error_msg = f"❌ **Error:** {str(e)}\n\n<details>\n<summary>Stack Trace</summary>\n\n```\n{error_trace}\n```\n</details>"
401
-
402
- chatbot_history.append({
403
- "role": "assistant",
404
- "content": error_msg
405
- })
406
-
407
- yield chatbot_history, None, None, None, None, f"❌ Error: {str(e)}"
408
 
409
 
410
  # Custom theme
@@ -603,7 +1145,7 @@ with gr.Blocks(title="HistoPath Agent", theme=custom_theme, css="""
603
  )
604
 
605
  submit_btn.click(
606
- fn=process_agent_response,
607
  inputs=[prompt_input, file_upload, chatbot],
608
  outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
609
  )
@@ -615,7 +1157,7 @@ with gr.Blocks(title="HistoPath Agent", theme=custom_theme, css="""
615
 
616
  # Allow enter key to submit
617
  prompt_input.submit(
618
- fn=process_agent_response,
619
  inputs=[prompt_input, file_upload, chatbot],
620
  outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
621
  )
@@ -632,5 +1174,7 @@ if __name__ == "__main__":
632
  print("Starting server...")
633
  print("=" * 60)
634
 
635
- # Launch the app
636
- demo.launch(show_api=False)
 
 
 
1
+ # import os
2
+ # import re
3
+ # import shutil
4
+ # import traceback
5
+ # import gradio as gr
6
+ # from pathlib import Path
7
+ # from histopath.agent import A1
8
+ # from dotenv import load_dotenv
9
+
10
+ # # Load environment variables
11
+ # load_dotenv()
12
+
13
+ # # Get passcode from environment
14
+ # PASSCODE = os.getenv("GRADIO_PASSWORD")
15
+
16
+ # # Initialize agent (will be created after passcode validation)
17
+ # agent = None
18
+
19
+
20
+ # def check_for_output_files():
21
+ # """Check for all files in the output directory and return their paths."""
22
+ # output_dir = Path("./output")
23
+ # if not output_dir.exists():
24
+ # return [], []
25
+
26
+ # image_extensions = {".png", ".jpg", ".jpeg", ".svg", ".tif", ".tiff"}
27
+ # data_extensions = {".csv", ".txt", ".json", ".npy"}
28
+
29
+ # images = []
30
+ # data_files = []
31
+
32
+ # for file in output_dir.iterdir():
33
+ # if file.is_file():
34
+ # if file.suffix.lower() in image_extensions:
35
+ # images.append(str(file))
36
+ # elif file.suffix.lower() in data_extensions:
37
+ # data_files.append(str(file))
38
+
39
+ # return images, data_files
40
+
41
+
42
+ # def preview_uploaded_file(uploaded_file):
43
+ # """Preview the uploaded file - show image or file info."""
44
+ # if uploaded_file is None:
45
+ # return None, None, "No file uploaded"
46
+
47
+ # file_path = Path(uploaded_file.name)
48
+ # file_ext = file_path.suffix.lower()
49
+
50
+ # image_extensions = {".png", ".jpg", ".jpeg", ".svg", ".tif", ".tiff", ".svs"}
51
+
52
+ # if file_ext in image_extensions:
53
+ # # Show image preview
54
+ # return uploaded_file.name, None, f"πŸ“· Previewing: {file_path.name}"
55
+ # else:
56
+ # # Show file info
57
+ # file_size = Path(uploaded_file.name).stat().st_size / 1024 # KB
58
+ # return None, uploaded_file.name, f"πŸ“„ File: {file_path.name} ({file_size:.1f} KB)"
59
+
60
+
61
+ # def parse_agent_output(output):
62
+ # """Parse agent output to extract code blocks, observations, and regular text."""
63
+ # # Strip out the message divider bars
64
+ # output = re.sub(r'={30,}\s*(Human|Ai)\s+Message\s*={30,}', '', output)
65
+ # output = output.strip()
66
+
67
+ # parsed = {
68
+ # "type": "text",
69
+ # "content": output,
70
+ # "code": None,
71
+ # "observation": None,
72
+ # "thinking": None
73
+ # }
74
+
75
+ # # Check for code execution block
76
+ # execute_match = re.search(r'<execute>(.*?)</execute>', output, re.DOTALL)
77
+ # if execute_match:
78
+ # parsed["type"] = "code"
79
+ # parsed["code"] = execute_match.group(1).strip()
80
+ # # Extract text before the code block (thinking/explanation)
81
+ # text_before = output[:execute_match.start()].strip()
82
+ # # Remove any think tags but keep the content
83
+ # text_before = re.sub(r'<think>(.*?)</think>', r'\1', text_before, flags=re.DOTALL)
84
+ # text_before = re.sub(r'={30,}.*?={30,}', '', text_before).strip()
85
+ # parsed["thinking"] = text_before if text_before else None
86
+ # return parsed
87
+
88
+ # # Check for observation block
89
+ # observation_match = re.search(r'<observation>(.*?)</observation>', output, re.DOTALL)
90
+ # if observation_match:
91
+ # parsed["type"] = "observation"
92
+ # parsed["observation"] = observation_match.group(1).strip()
93
+ # # Extract text before observation if any
94
+ # text_before = output[:observation_match.start()].strip()
95
+ # text_before = re.sub(r'<think>(.*?)</think>', r'\1', text_before, flags=re.DOTALL)
96
+ # text_before = re.sub(r'={30,}.*?={30,}', '', text_before).strip()
97
+ # parsed["thinking"] = text_before if text_before else None
98
+ # return parsed
99
+
100
+ # # Check for solution block
101
+ # solution_match = re.search(r'<solution>(.*?)</solution>', output, re.DOTALL)
102
+ # if solution_match:
103
+ # parsed["type"] = "solution"
104
+ # parsed["content"] = solution_match.group(1).strip()
105
+ # # Get thinking before solution
106
+ # text_before = output[:solution_match.start()].strip()
107
+ # text_before = re.sub(r'<think>(.*?)</think>', r'\1', text_before, flags=re.DOTALL)
108
+ # text_before = re.sub(r'={30,}.*?={30,}', '', text_before).strip()
109
+ # parsed["thinking"] = text_before if text_before else None
110
+ # return parsed
111
+
112
+ # # Clean up any remaining tags for display
113
+ # cleaned = re.sub(r'<think>(.*?)</think>', r'\1', output, flags=re.DOTALL)
114
+ # cleaned = re.sub(r'={30,}.*?={30,}', '', cleaned).strip()
115
+ # parsed["content"] = cleaned
116
+
117
+ # return parsed
118
+
119
+
120
+ # def format_message_for_display(parsed_output):
121
+ # """Format parsed output into a readable message for the chatbot."""
122
+ # msg_parts = []
123
+
124
+ # # Add thinking/explanation text first if present
125
+ # if parsed_output.get("thinking"):
126
+ # msg_parts.append(parsed_output["thinking"])
127
+
128
+ # if parsed_output["type"] == "code":
129
+ # # Add separator if there was thinking text
130
+ # if parsed_output.get("thinking"):
131
+ # msg_parts.append("\n---\n")
132
+
133
+ # msg_parts.append("### πŸ’» Executing Code\n")
134
+ # msg_parts.append(f"```python\n{parsed_output['code']}\n```")
135
+
136
+ # elif parsed_output["type"] == "observation":
137
+ # # Add separator if there was thinking text
138
+ # if parsed_output.get("thinking"):
139
+ # msg_parts.append("\n---\n")
140
+
141
+ # msg_parts.append("### πŸ“Š Observation\n")
142
+ # msg_parts.append(f"```\n{parsed_output['observation']}\n```")
143
+
144
+ # elif parsed_output["type"] == "solution":
145
+ # # Add separator if there was thinking text
146
+ # if parsed_output.get("thinking"):
147
+ # msg_parts.append("\n---\n")
148
+
149
+ # msg_parts.append("### βœ… Solution\n")
150
+ # msg_parts.append(parsed_output['content'])
151
+
152
+ # else:
153
+ # # For regular text, just add the content if thinking wasn't already set
154
+ # if not parsed_output.get("thinking"):
155
+ # msg_parts.append(parsed_output["content"])
156
+
157
+ # return "\n\n".join(msg_parts)
158
+
159
+
160
+ # def process_agent_response(prompt, uploaded_file, chatbot_history):
161
+ # """Process the agent response and update chatbot - AGGRESSIVE FIX: Minimal yields."""
162
+ # global agent
163
+
164
+ # if agent is None:
165
+ # chatbot_history.append({
166
+ # "role": "assistant",
167
+ # "content": "⚠️ Please enter the passcode first to initialize the agent."
168
+ # })
169
+ # yield chatbot_history, None, None, None, None, "⚠️ Agent not initialized"
170
+ # return
171
+
172
+ # if not prompt.strip() and uploaded_file is None:
173
+ # chatbot_history.append({
174
+ # "role": "assistant",
175
+ # "content": "⚠️ Please provide a prompt or upload a file."
176
+ # })
177
+ # yield chatbot_history, None, None, None, None, "⚠️ No input provided"
178
+ # return
179
+
180
+ # # Handle file upload
181
+ # file_path = None
182
+ # file_info = ""
183
+ # if uploaded_file is not None:
184
+ # try:
185
+ # # Create data directory if it doesn't exist
186
+ # data_dir = Path("./data")
187
+ # data_dir.mkdir(exist_ok=True)
188
+
189
+ # # Copy uploaded file to data directory
190
+ # file_name = Path(uploaded_file.name).name
191
+ # file_path = data_dir / file_name
192
+ # shutil.copy(uploaded_file.name, file_path)
193
+
194
+ # file_info = f"\n\nπŸ“Ž **Uploaded file:** `{file_path}`\n"
195
+
196
+ # # Augment prompt with file path
197
+ # if prompt.strip():
198
+ # prompt = f"{prompt}\n\nUploaded file path: {file_path}"
199
+ # else:
200
+ # prompt = f"I have uploaded a file at: {file_path}. Please analyze it."
201
+
202
+ # except Exception as e:
203
+ # error_msg = f"❌ Error handling file upload: {str(e)}"
204
+ # chatbot_history.append({
205
+ # "role": "assistant",
206
+ # "content": error_msg
207
+ # })
208
+ # yield chatbot_history, None, None, None, None, error_msg
209
+ # return
210
+
211
+ # # Add user message to chat
212
+ # user_message = prompt if not file_info else f"{prompt}{file_info}"
213
+ # chatbot_history.append({"role": "user", "content": user_message})
214
+
215
+ # # CRITICAL FIX: Only yield once at the start to show user message
216
+ # yield chatbot_history, None, None, None, None, "πŸ”„ Processing..."
217
+
218
+ # try:
219
+ # # CRITICAL FIX: Collect ALL steps without yielding
220
+ # step_count = 0
221
+ # collected_outputs = []
222
+
223
+ # for step in agent.go_stream(prompt):
224
+ # step_count += 1
225
+ # output = step.get("output", "")
226
+
227
+ # if output:
228
+ # collected_outputs.append(output)
229
+
230
+ # # CRITICAL FIX: Process ALL collected outputs at once
231
+ # for output in collected_outputs:
232
+ # parsed = parse_agent_output(output)
233
+ # formatted_message = format_message_for_display(parsed)
234
+
235
+ # # Update or append to chatbot history
236
+ # if chatbot_history and chatbot_history[-1]["role"] == "assistant":
237
+ # # Update the last assistant message
238
+ # chatbot_history[-1]["content"] = formatted_message
239
+ # else:
240
+ # # Add new assistant message
241
+ # chatbot_history.append({
242
+ # "role": "assistant",
243
+ # "content": formatted_message
244
+ # })
245
+
246
+ # # CRITICAL FIX: Check files only ONCE after all processing
247
+ # images, data = check_for_output_files()
248
+
249
+ # status_msg = f"βœ… Complete ({step_count} steps)"
250
+ # if images:
251
+ # status_msg += f" | {len(images)} image(s)"
252
+ # if data:
253
+ # status_msg += f" | {len(data)} data file(s)"
254
+
255
+ # # CRITICAL FIX: Final single yield with all results
256
+ # yield chatbot_history, images, data, None, None, status_msg
257
+
258
+ # except Exception as e:
259
+ # error_trace = traceback.format_exc()
260
+ # error_msg = f"❌ **Error:** {str(e)}\n\n<details>\n<summary>Stack Trace</summary>\n\n```\n{error_trace}\n```\n</details>"
261
+
262
+ # chatbot_history.append({
263
+ # "role": "assistant",
264
+ # "content": error_msg
265
+ # })
266
+
267
+ # yield chatbot_history, None, None, None, None, f"❌ Error: {str(e)}"
268
+
269
+
270
+ # def clear_chat():
271
+ # """Clear the chat history and outputs."""
272
+ # return [], None, None, None, None, "Ready"
273
+
274
+
275
+ # def validate_passcode(input_passcode):
276
+ # """Validate the passcode and initialize the agent."""
277
+ # global agent
278
+
279
+ # if input_passcode == PASSCODE:
280
+ # try:
281
+ # # Initialize the agent
282
+ # agent = A1(
283
+ # path="./data",
284
+ # llm="claude-sonnet-4-20250514",
285
+ # use_tool_retriever=True,
286
+ # timeout_seconds=600
287
+ # )
288
+ # return (
289
+ # gr.update(visible=False), # Hide passcode section
290
+ # gr.update(visible=True), # Show main interface
291
+ # "βœ… Access granted! Agent initialized successfully."
292
+ # )
293
+ # except Exception as e:
294
+ # error_msg = f"❌ Failed to initialize agent: {str(e)}"
295
+ # return (
296
+ # gr.update(visible=True),
297
+ # gr.update(visible=False),
298
+ # error_msg
299
+ # )
300
+ # else:
301
+ # return (
302
+ # gr.update(visible=True),
303
+ # gr.update(visible=False),
304
+ # "❌ Invalid passcode. Please try again."
305
+ # )
306
+ # # batched streaming instead
307
+ # def process_agent_response_batched(prompt, uploaded_file, chatbot_history, batch_size=5):
308
+ # """Process agent response with BATCHED updates (every N steps)."""
309
+ # global agent
310
+
311
+ # if agent is None:
312
+ # chatbot_history.append({
313
+ # "role": "assistant",
314
+ # "content": "⚠️ Please enter the passcode first to initialize the agent."
315
+ # })
316
+ # yield chatbot_history, None, None, None, None, "⚠️ Agent not initialized"
317
+ # return
318
+
319
+ # if not prompt.strip() and uploaded_file is None:
320
+ # chatbot_history.append({
321
+ # "role": "assistant",
322
+ # "content": "⚠️ Please provide a prompt or upload a file."
323
+ # })
324
+ # yield chatbot_history, None, None, None, None, "⚠️ No input provided"
325
+ # return
326
+
327
+ # # Handle file upload
328
+ # file_path = None
329
+ # file_info = ""
330
+ # if uploaded_file is not None:
331
+ # try:
332
+ # data_dir = Path("./data")
333
+ # data_dir.mkdir(exist_ok=True)
334
+
335
+ # file_name = Path(uploaded_file.name).name
336
+ # file_path = data_dir / file_name
337
+ # shutil.copy(uploaded_file.name, file_path)
338
+
339
+ # file_info = f"\n\nπŸ“Ž **Uploaded file:** `{file_path}`\n"
340
+
341
+ # if prompt.strip():
342
+ # prompt = f"{prompt}\n\nUploaded file path: {file_path}"
343
+ # else:
344
+ # prompt = f"I have uploaded a file at: {file_path}. Please analyze it."
345
+
346
+ # except Exception as e:
347
+ # error_msg = f"❌ Error handling file upload: {str(e)}"
348
+ # chatbot_history.append({
349
+ # "role": "assistant",
350
+ # "content": error_msg
351
+ # })
352
+ # yield chatbot_history, None, None, None, None, error_msg
353
+ # return
354
+
355
+ # # Add user message to chat
356
+ # user_message = prompt if not file_info else f"{prompt}{file_info}"
357
+ # chatbot_history.append({"role": "user", "content": user_message})
358
+ # yield chatbot_history, None, None, None, None, "πŸ”„ Processing..."
359
+
360
+ # try:
361
+ # # Stream with batching
362
+ # step_count = 0
363
+ # batch_count = 0
364
+
365
+ # for step in agent.go_stream(prompt):
366
+ # step_count += 1
367
+ # output = step.get("output", "")
368
+
369
+ # if output:
370
+ # parsed = parse_agent_output(output)
371
+ # formatted_message = format_message_for_display(parsed)
372
+
373
+ # # Update chatbot history
374
+ # if chatbot_history and chatbot_history[-1]["role"] == "assistant":
375
+ # chatbot_history[-1]["content"] = formatted_message
376
+ # else:
377
+ # chatbot_history.append({
378
+ # "role": "assistant",
379
+ # "content": formatted_message
380
+ # })
381
+
382
+ # # Only yield every batch_size steps
383
+ # if step_count % batch_size == 0:
384
+ # batch_count += 1
385
+ # yield chatbot_history, None, None, None, None, f"πŸ”„ Step {step_count}..."
386
+
387
+ # # Final yield with files
388
+ # images, data = check_for_output_files()
389
+
390
+ # status_msg = f"βœ… Complete ({step_count} steps)"
391
+ # if images:
392
+ # status_msg += f" | {len(images)} image(s)"
393
+ # if data:
394
+ # status_msg += f" | {len(data)} data file(s)"
395
+
396
+ # yield chatbot_history, images, data, None, None, status_msg
397
+
398
+ # except Exception as e:
399
+ # error_trace = traceback.format_exc()
400
+ # error_msg = f"❌ **Error:** {str(e)}\n\n<details>\n<summary>Stack Trace</summary>\n\n```\n{error_trace}\n```\n</details>"
401
+
402
+ # chatbot_history.append({
403
+ # "role": "assistant",
404
+ # "content": error_msg
405
+ # })
406
+
407
+ # yield chatbot_history, None, None, None, None, f"❌ Error: {str(e)}"
408
+
409
+
410
+ # # Custom theme
411
+ # custom_theme = gr.themes.Soft(
412
+ # primary_hue="indigo",
413
+ # secondary_hue="purple",
414
+ # neutral_hue="slate",
415
+ # font=["Inter", "system-ui", "sans-serif"],
416
+ # text_size="md",
417
+ # ).set(
418
+ # button_primary_background_fill="*primary_500",
419
+ # button_primary_background_fill_hover="*primary_600",
420
+ # block_label_text_weight="600",
421
+ # block_title_text_weight="600",
422
+ # )
423
+
424
+ # with gr.Blocks(title="HistoPath Agent", theme=custom_theme, css="""
425
+ # .gradio-container {
426
+ # max-width: 100% !important;
427
+ # }
428
+ # .main-header {
429
+ # text-align: center;
430
+ # padding: 1.5rem 0;
431
+ # background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
432
+ # color: white;
433
+ # border-radius: 8px;
434
+ # margin-bottom: 1.5rem;
435
+ # }
436
+ # .main-header h1 {
437
+ # margin: 0;
438
+ # font-size: 2.2rem;
439
+ # font-weight: 700;
440
+ # }
441
+ # .main-header p {
442
+ # margin: 0.5rem 0 0 0;
443
+ # opacity: 0.95;
444
+ # font-size: 1.1rem;
445
+ # }
446
+ # .file-upload-box .wrap {
447
+ # min-width: 0 !important;
448
+ # }
449
+ # .file-upload-box .file-name {
450
+ # word-break: break-word !important;
451
+ # white-space: normal !important;
452
+ # overflow-wrap: break-word !important;
453
+ # }
454
+ # .tab-nav {
455
+ # margin-bottom: 0.5rem;
456
+ # }
457
+ # /* Better styling for code and observation blocks */
458
+ # .message.bot pre {
459
+ # background-color: #f6f8fa !important;
460
+ # border: 1px solid #d0d7de !important;
461
+ # border-radius: 6px !important;
462
+ # padding: 12px !important;
463
+ # margin: 8px 0 !important;
464
+ # }
465
+ # .message.bot h3 {
466
+ # margin-top: 12px !important;
467
+ # margin-bottom: 8px !important;
468
+ # font-weight: 600 !important;
469
+ # }
470
+ # .message.bot hr {
471
+ # border: none !important;
472
+ # border-top: 2px solid #e1e4e8 !important;
473
+ # margin: 16px 0 !important;
474
+ # }
475
+ # """) as demo:
476
+
477
+ # # Header
478
+ # gr.HTML("""
479
+ # <div class="main-header">
480
+ # <h1>πŸ”¬ HistoPath Agent</h1>
481
+ # <p>AI-Powered Histopathology Analysis Assistant</p>
482
+ # </div>
483
+ # """)
484
+
485
+ # # Passcode section
486
+ # with gr.Group(visible=True) as passcode_section:
487
+ # gr.Markdown("### πŸ” Authentication Required")
488
+
489
+ # with gr.Row():
490
+ # passcode_input = gr.Textbox(
491
+ # label="Passcode",
492
+ # type="password",
493
+ # placeholder="Enter your passcode...",
494
+ # scale=3
495
+ # )
496
+ # passcode_btn = gr.Button("πŸ”“ Unlock", variant="primary", scale=1, size="lg")
497
+
498
+ # passcode_status = gr.Textbox(
499
+ # label="Status",
500
+ # interactive=False,
501
+ # lines=2
502
+ # )
503
+
504
+ # # Main interface (hidden initially)
505
+ # with gr.Group(visible=False) as main_interface:
506
+ # with gr.Row(equal_height=True):
507
+ # # Left column - Chat interface
508
+ # with gr.Column(scale=3):
509
+ # chatbot = gr.Chatbot(
510
+ # label="πŸ’¬ Conversation",
511
+ # type="messages",
512
+ # height=550,
513
+ # show_label=True,
514
+ # render_markdown=True,
515
+ # )
516
+
517
+ # # Input area
518
+ # with gr.Row():
519
+ # with gr.Column(scale=7):
520
+ # prompt_input = gr.Textbox(
521
+ # label="Your Query",
522
+ # placeholder="E.g., 'Caption the uploaded whole slide image' or 'Segment cells using instanseg model'",
523
+ # lines=2,
524
+ # max_lines=5,
525
+ # show_label=False,
526
+ # )
527
+ # with gr.Column(scale=3):
528
+ # file_upload = gr.File(
529
+ # label="πŸ“Ž Upload File",
530
+ # file_types=[".svs", ".png", ".jpg", ".jpeg", ".tif", ".tiff", ".csv", ".txt", ".json", ".npy"],
531
+ # height=75,
532
+ # elem_classes="file-upload-box",
533
+ # )
534
+
535
+ # with gr.Row():
536
+ # submit_btn = gr.Button("πŸš€ Submit", variant="primary", scale=3, size="lg")
537
+ # clear_btn = gr.Button("πŸ—‘οΈ Clear", scale=1, size="lg", variant="secondary")
538
+
539
+ # status_text = gr.Textbox(
540
+ # label="Status",
541
+ # interactive=False,
542
+ # value="Ready",
543
+ # show_label=False,
544
+ # container=False,
545
+ # )
546
+
547
+ # # Right column - Outputs
548
+ # with gr.Column(scale=2):
549
+ # with gr.Tabs():
550
+ # with gr.Tab("πŸ“₯ Input"):
551
+ # with gr.Column():
552
+ # input_image_preview = gr.Image(
553
+ # label="Input Image",
554
+ # height=400,
555
+ # show_label=False,
556
+ # container=True,
557
+ # )
558
+ # input_file_preview = gr.File(
559
+ # label="Input File",
560
+ # interactive=False,
561
+ # height=100,
562
+ # show_label=False,
563
+ # container=True,
564
+ # )
565
+ # input_status = gr.Textbox(
566
+ # value="Upload a file to preview",
567
+ # show_label=False,
568
+ # interactive=False,
569
+ # container=False,
570
+ # )
571
+
572
+ # with gr.Tab("πŸ–ΌοΈ Images"):
573
+ # output_gallery = gr.Gallery(
574
+ # label="Generated Visualizations",
575
+ # columns=1,
576
+ # height=600,
577
+ # object_fit="contain",
578
+ # show_label=False,
579
+ # show_download_button=True,
580
+ # )
581
+
582
+ # with gr.Tab("πŸ“„ Data"):
583
+ # data_files = gr.File(
584
+ # label="Generated Data Files",
585
+ # file_count="multiple",
586
+ # interactive=False,
587
+ # height=600,
588
+ # show_label=False,
589
+ # )
590
+
591
+ # # Event handlers
592
+ # passcode_btn.click(
593
+ # fn=validate_passcode,
594
+ # inputs=[passcode_input],
595
+ # outputs=[passcode_section, main_interface, passcode_status]
596
+ # )
597
+
598
+ # # File upload preview
599
+ # file_upload.change(
600
+ # fn=preview_uploaded_file,
601
+ # inputs=[file_upload],
602
+ # outputs=[input_image_preview, input_file_preview, input_status]
603
+ # )
604
+
605
+ # submit_btn.click(
606
+ # fn=process_agent_response,
607
+ # inputs=[prompt_input, file_upload, chatbot],
608
+ # outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
609
+ # )
610
+
611
+ # clear_btn.click(
612
+ # fn=clear_chat,
613
+ # outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
614
+ # )
615
+
616
+ # # Allow enter key to submit
617
+ # prompt_input.submit(
618
+ # fn=process_agent_response,
619
+ # inputs=[prompt_input, file_upload, chatbot],
620
+ # outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
621
+ # )
622
+
623
+
624
+ # if __name__ == "__main__":
625
+ # # Create necessary directories
626
+ # Path("./data").mkdir(exist_ok=True)
627
+ # Path("./output").mkdir(exist_ok=True)
628
+
629
+ # print("=" * 60)
630
+ # print("πŸ”¬ HistoPath Agent - Gradio Interface")
631
+ # print("=" * 60)
632
+ # print("Starting server...")
633
+ # print("=" * 60)
634
+
635
+ # # Launch the app
636
+ # demo.launch(show_api=False)
637
  import os
638
  import re
639
  import shutil
 
642
  from pathlib import Path
643
  from histopath.agent import A1
644
  from dotenv import load_dotenv
645
+ from typing import List, Dict, Any, Optional, Tuple
646
 
647
  # Load environment variables
648
  load_dotenv()
 
794
  return "\n\n".join(msg_parts)
795
 
796
 
797
+ # CRITICAL FIX: Wrap the main processing function to simplify type signatures
798
+ def process_agent_response_wrapper(prompt: str, uploaded_file, chatbot_history: List[Dict[str, str]]):
799
+ """
800
+ Wrapper function with simplified type hints to avoid Gradio API introspection issues.
801
+
802
+ Args:
803
+ prompt: User query string
804
+ uploaded_file: Uploaded file object (or None)
805
+ chatbot_history: List of message dictionaries
806
+
807
+ Returns:
808
+ Tuple of (chatbot_history, images, data_files, preview_image, preview_file, status)
809
+ """
810
  global agent
811
 
812
  if agent is None:
 
871
  for step in agent.go_stream(prompt):
872
  step_count += 1
873
  output = step.get("output", "")
874
+ collected_outputs.append(output)
 
 
875
 
876
+ # Now process all collected outputs
877
  for output in collected_outputs:
878
+ if not output or output.strip() == "":
879
+ continue
880
+
881
  parsed = parse_agent_output(output)
882
+ formatted_msg = format_message_for_display(parsed)
883
 
884
+ if formatted_msg and formatted_msg.strip():
 
 
 
 
 
885
  chatbot_history.append({
886
  "role": "assistant",
887
+ "content": formatted_msg
888
  })
889
 
890
+ # CRITICAL: Single final yield with all results
891
  images, data = check_for_output_files()
892
+ final_status = f"βœ… Complete! Processed {step_count} steps"
893
 
894
+ yield chatbot_history, images, data, None, None, final_status
 
 
 
 
 
 
 
895
 
896
  except Exception as e:
897
  error_trace = traceback.format_exc()
898
+ error_msg = f"❌ **Error occurred:**\n\n```\n{str(e)}\n\n{error_trace}\n```"
 
899
  chatbot_history.append({
900
  "role": "assistant",
901
  "content": error_msg
902
  })
 
903
  yield chatbot_history, None, None, None, None, f"❌ Error: {str(e)}"
904
 
905
 
906
+ def validate_passcode(input_passcode: str) -> Tuple:
 
 
 
 
 
907
  """Validate the passcode and initialize the agent."""
908
  global agent
909
 
 
913
  agent = A1(
914
  path="./data",
915
  llm="claude-sonnet-4-20250514",
916
+ source="Anthropic",
917
  use_tool_retriever=True,
918
  timeout_seconds=600
919
  )
920
+
921
  return (
922
  gr.update(visible=False), # Hide passcode section
923
  gr.update(visible=True), # Show main interface
924
+ "βœ… Authentication successful! Agent initialized."
925
  )
926
  except Exception as e:
 
927
  return (
928
+ gr.update(visible=True), # Keep passcode section visible
929
+ gr.update(visible=False), # Keep main interface hidden
930
+ f"❌ Error initializing agent: {str(e)}"
931
  )
932
  else:
933
  return (
934
+ gr.update(visible=True), # Keep passcode section visible
935
+ gr.update(visible=False), # Keep main interface hidden
936
  "❌ Invalid passcode. Please try again."
937
  )
938
+
939
+
940
+ def clear_chat():
941
+ """Clear the chat and reset outputs."""
942
+ # Clean output directory
943
+ output_dir = Path("./output")
944
+ if output_dir.exists():
945
+ for file in output_dir.iterdir():
946
+ if file.is_file():
947
+ file.unlink()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
948
 
949
+ return [], None, None, None, None, "Chat cleared"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
950
 
951
 
952
  # Custom theme
 
1145
  )
1146
 
1147
  submit_btn.click(
1148
+ fn=process_agent_response_wrapper,
1149
  inputs=[prompt_input, file_upload, chatbot],
1150
  outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
1151
  )
 
1157
 
1158
  # Allow enter key to submit
1159
  prompt_input.submit(
1160
+ fn=process_agent_response_wrapper,
1161
  inputs=[prompt_input, file_upload, chatbot],
1162
  outputs=[chatbot, output_gallery, data_files, input_image_preview, input_file_preview, status_text]
1163
  )
 
1174
  print("Starting server...")
1175
  print("=" * 60)
1176
 
1177
+ # Launch the app with API documentation completely disabled
1178
+ demo.launch(
1179
+ show_api=False,
1180
+ )