AshenH commited on
Commit
da25b2a
Β·
verified Β·
1 Parent(s): 68c51bb

enhanced app.py file

Browse files
Files changed (1) hide show
  1. app.py +390 -102
app.py CHANGED
@@ -1,8 +1,10 @@
1
  # space/app.py
2
  import os
3
  import json
 
4
  import gradio as gr
5
  import pandas as pd
 
6
 
7
  from tools.sql_tool import SQLTool
8
  from tools.predict_tool import PredictTool
@@ -14,26 +16,48 @@ from tools.ts_forecast_tool import TimeseriesForecastTool
14
  from utils.tracing import Tracer
15
  from utils.config import AppConfig
16
 
17
- # Optional tiny CPU LLM for planning (can be disabled by not setting ORCHESTRATOR_MODEL)
 
 
 
 
 
 
 
 
 
 
 
 
18
  llm = None
19
  LLM_ID = os.getenv("ORCHESTRATOR_MODEL")
20
  if LLM_ID:
21
  try:
22
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
23
  _tok = AutoTokenizer.from_pretrained(LLM_ID)
24
  _mdl = AutoModelForCausalLM.from_pretrained(LLM_ID)
25
  llm = pipeline("text-generation", model=_mdl, tokenizer=_tok, max_new_tokens=512)
26
- except Exception:
 
 
27
  llm = None
28
 
29
- cfg = AppConfig.from_env()
30
- tracer = Tracer.from_env()
31
-
32
- sql_tool = SQLTool(cfg, tracer)
33
- predict_tool = PredictTool(cfg, tracer)
34
- explain_tool = ExplainTool(cfg, tracer)
35
- report_tool = ReportTool(cfg, tracer)
36
- ts_tool = TimeseriesForecastTool(cfg, tracer) # Granite wrapper
 
 
 
 
 
 
 
37
 
38
  SYSTEM_PROMPT = (
39
  "You are an analytical assistant for tabular data. "
@@ -42,7 +66,40 @@ SYSTEM_PROMPT = (
42
  "Always disclose the steps taken."
43
  )
44
 
45
- def plan_actions(message: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  if llm is not None:
47
  prompt = (
48
  f"{SYSTEM_PROMPT}\nUser: {message}\n"
@@ -52,104 +109,335 @@ def plan_actions(message: str):
52
  out = llm(prompt)[0]["generated_text"]
53
  last = out.split("\n")[-1].strip()
54
  obj = json.loads(last) if last.startswith("{") else json.loads(out[out.rfind("{"):])
 
55
  if isinstance(obj, dict) and "steps" in obj:
56
- return obj
57
- except Exception:
58
- pass
59
- # Fallback heuristic:
 
 
 
 
 
 
 
 
60
  m = message.lower()
61
  steps = []
62
- if any(k in m for k in ["show", "average", "count", "trend", "top", "sql", "query", "kpi"]): steps.append("sql")
63
- if any(k in m for k in ["predict", "score", "risk", "propensity", "probability"]): steps.append("predict")
64
- if any(k in m for k in ["why", "explain", "shap", "feature", "attribution"]): steps.append("explain")
65
- if any(k in m for k in ["report", "download", "pdf", "summary"]): steps.append("report")
66
- if any(k in m for k in ["forecast", "next", "horizon", "granite"]): steps.append("forecast")
67
- if not steps: steps = ["sql"]
68
- return {"steps": steps, "rationale": "Rule-based plan."}
69
-
70
- def run_agent(message: str, hitl_decision: str = "Approve", reviewer_note: str = ""):
71
- tracer.trace_event("user_message", {"message": message})
72
- plan = plan_actions(message)
73
- tracer.trace_event("plan", plan)
74
-
75
- sql_df = None
76
- predict_df = None
77
- explain_imgs = {}
78
- artifacts = {}
79
- ts_forecast_df = None
80
-
81
- if "sql" in plan["steps"]:
82
- sql_df = sql_tool.run(message)
83
- artifacts["sql_rows"] = int(len(sql_df)) if isinstance(sql_df, pd.DataFrame) else 0
84
-
85
- if "predict" in plan["steps"]:
86
- predict_df = predict_tool.run(sql_df)
87
-
88
- ts_df = None
89
- if sql_df is not None:
90
- try:
91
- ts_df = build_timeseries(sql_df)
92
- except Exception:
93
- ts_df = None
94
-
95
- if "forecast" in plan["steps"] and ts_df is not None:
96
- # Expect 'portfolio_value' after preprocessing
97
- # Use the combined series β€” e.g., sum over instruments by timestamp
98
- agg = ts_df.groupby("timestamp", as_index=True)["portfolio_value"].sum().sort_index()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  try:
100
- ts_forecast_df = ts_tool.zeroshot_forecast(agg, horizon=96)
 
101
  except Exception as e:
102
- # Surface a readable error in response
103
- ts_forecast_df = pd.DataFrame({"error": [str(e)]})
104
-
105
- if "explain" in plan["steps"]:
106
- explain_imgs = explain_tool.run(predict_df or sql_df)
107
-
108
- report_link = None
109
- if "report" in plan["steps"]:
110
- # Add forecast preview if available
111
- forecast_preview = ts_forecast_df.head(50) if isinstance(ts_forecast_df, pd.DataFrame) else None
112
- report_link = report_tool.render_and_save(
113
- user_query=message,
114
- sql_preview=sql_df.head(50) if isinstance(sql_df, pd.DataFrame) else None,
115
- predict_preview=predict_df.head(50) if isinstance(predict_df, pd.DataFrame) else forecast_preview,
116
- explain_images=explain_imgs,
117
- plan=plan,
118
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
- tracer.trace_event("hitl", {
121
- "message": message,
122
- "decision": hitl_decision,
123
- "reviewer_note": reviewer_note,
124
- "artifacts": artifacts,
125
- "plan": plan,
126
- })
127
-
128
- # Compose response
129
- response = f"**Plan:** {plan['steps']}\n**Rationale:** {plan['rationale']}\n"
130
- if isinstance(sql_df, pd.DataFrame): response += f"\n**SQL rows:** {len(sql_df)}"
131
- if isinstance(predict_df, pd.DataFrame): response += f"\n**Predictions rows:** {len(predict_df)}"
132
- if isinstance(ts_forecast_df, pd.DataFrame) and "forecast" in ts_forecast_df.columns:
133
- response += f"\n**Forecast horizon:** {len(ts_forecast_df)}"
134
- if report_link: response += f"\n**Report:** {report_link}"
135
- if tracer.trace_url: response += f"\n**Trace:** {tracer.trace_url}"
136
-
137
- # Prefer to show forecast if present, else predictions, else raw query
138
- preview_df = ts_forecast_df if isinstance(ts_forecast_df, pd.DataFrame) and not ts_forecast_df.empty else \
139
- (predict_df if isinstance(predict_df, pd.DataFrame) and not predict_df.empty else sql_df)
140
- return response, (preview_df if isinstance(preview_df, pd.DataFrame) else pd.DataFrame())
141
-
142
- with gr.Blocks() as demo:
143
- gr.Markdown("# Tabular Agentic XAI (Free-Tier)")
144
  with gr.Row():
145
- msg = gr.Textbox(label="Ask your question")
 
 
 
 
 
146
  with gr.Row():
147
- hitl = gr.Radio(["Approve", "Needs Changes"], value="Approve", label="Human Review")
148
- note = gr.Textbox(label="Reviewer note (optional)")
149
- out_md = gr.Markdown()
150
- out_df = gr.Dataframe(interactive=False)
151
- ask = gr.Button("Run")
152
- ask.click(run_agent, inputs=[msg, hitl, note], outputs=[out_md, out_df])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
  if __name__ == "__main__":
155
- demo.launch()
 
 
 
 
 
 
1
  # space/app.py
2
  import os
3
  import json
4
+ import logging
5
  import gradio as gr
6
  import pandas as pd
7
+ from typing import Optional, Tuple
8
 
9
  from tools.sql_tool import SQLTool
10
  from tools.predict_tool import PredictTool
 
16
  from utils.tracing import Tracer
17
  from utils.config import AppConfig
18
 
19
+ # Configure logging
20
+ logging.basicConfig(
21
+ level=logging.INFO,
22
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
23
+ )
24
+ logger = logging.getLogger(__name__)
25
+
26
+ # Constants
27
+ MAX_RESPONSE_LENGTH = 10000
28
+ MAX_FORECAST_HORIZON = 365
29
+ DEFAULT_FORECAST_HORIZON = 96
30
+
31
+ # Optional LLM for planning
32
  llm = None
33
  LLM_ID = os.getenv("ORCHESTRATOR_MODEL")
34
  if LLM_ID:
35
  try:
36
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
37
+ logger.info(f"Loading orchestrator model: {LLM_ID}")
38
  _tok = AutoTokenizer.from_pretrained(LLM_ID)
39
  _mdl = AutoModelForCausalLM.from_pretrained(LLM_ID)
40
  llm = pipeline("text-generation", model=_mdl, tokenizer=_tok, max_new_tokens=512)
41
+ logger.info("Orchestrator model loaded successfully")
42
+ except Exception as e:
43
+ logger.warning(f"Failed to load orchestrator model: {e}. Using fallback planner.")
44
  llm = None
45
 
46
+ # Initialize configuration and tools
47
+ try:
48
+ cfg = AppConfig.from_env()
49
+ tracer = Tracer.from_env()
50
+
51
+ sql_tool = SQLTool(cfg, tracer)
52
+ predict_tool = PredictTool(cfg, tracer)
53
+ explain_tool = ExplainTool(cfg, tracer)
54
+ report_tool = ReportTool(cfg, tracer)
55
+ ts_tool = TimeseriesForecastTool(cfg, tracer)
56
+
57
+ logger.info("All tools initialized successfully")
58
+ except Exception as e:
59
+ logger.error(f"Failed to initialize application: {e}")
60
+ raise
61
 
62
  SYSTEM_PROMPT = (
63
  "You are an analytical assistant for tabular data. "
 
66
  "Always disclose the steps taken."
67
  )
68
 
69
+
70
+ def validate_message(message: str) -> Tuple[bool, str]:
71
+ """Validate user input message."""
72
+ if not message or not message.strip():
73
+ return False, "Please enter a valid question."
74
+
75
+ if len(message) > MAX_RESPONSE_LENGTH:
76
+ return False, f"Message too long. Please limit to {MAX_RESPONSE_LENGTH} characters."
77
+
78
+ # Basic SQL injection pattern detection
79
+ suspicious_patterns = [
80
+ r';\s*drop\s+table',
81
+ r';\s*delete\s+from',
82
+ r';\s*truncate',
83
+ r'union\s+select.*from',
84
+ r'exec\s*\(',
85
+ r'execute\s*\('
86
+ ]
87
+
88
+ import re
89
+ msg_lower = message.lower()
90
+ for pattern in suspicious_patterns:
91
+ if re.search(pattern, msg_lower):
92
+ logger.warning(f"Suspicious SQL pattern detected: {pattern}")
93
+ return False, "Query contains potentially unsafe patterns. Please rephrase."
94
+
95
+ return True, ""
96
+
97
+
98
+ def plan_actions(message: str) -> dict:
99
+ """
100
+ Determine which tools to execute based on the user message.
101
+ Uses LLM if available, otherwise falls back to heuristics.
102
+ """
103
  if llm is not None:
104
  prompt = (
105
  f"{SYSTEM_PROMPT}\nUser: {message}\n"
 
109
  out = llm(prompt)[0]["generated_text"]
110
  last = out.split("\n")[-1].strip()
111
  obj = json.loads(last) if last.startswith("{") else json.loads(out[out.rfind("{"):])
112
+
113
  if isinstance(obj, dict) and "steps" in obj:
114
+ # Validate steps
115
+ valid_steps = {'sql', 'predict', 'explain', 'report', 'forecast'}
116
+ obj["steps"] = [s for s in obj["steps"] if s in valid_steps]
117
+ if obj["steps"]:
118
+ logger.info(f"LLM plan: {obj['steps']}")
119
+ return obj
120
+ except json.JSONDecodeError as e:
121
+ logger.warning(f"Failed to parse LLM output as JSON: {e}")
122
+ except Exception as e:
123
+ logger.warning(f"LLM planning failed: {e}")
124
+
125
+ # Fallback heuristic planning
126
  m = message.lower()
127
  steps = []
128
+
129
+ # SQL keywords
130
+ if any(k in m for k in ["show", "average", "count", "trend", "top", "sql", "query",
131
+ "kpi", "data", "retrieve", "fetch", "list", "view"]):
132
+ steps.append("sql")
133
+
134
+ # Prediction keywords
135
+ if any(k in m for k in ["predict", "score", "risk", "propensity", "probability",
136
+ "classification", "regression"]):
137
+ steps.append("predict")
138
+ if "sql" not in steps:
139
+ steps.insert(0, "sql") # Need data first
140
+
141
+ # Explanation keywords
142
+ if any(k in m for k in ["why", "explain", "shap", "feature", "attribution",
143
+ "importance", "interpret"]):
144
+ steps.append("explain")
145
+ if "predict" not in steps:
146
+ steps.insert(0, "predict")
147
+ if "sql" not in steps:
148
+ steps.insert(0, "sql")
149
+
150
+ # Report keywords
151
+ if any(k in m for k in ["report", "download", "pdf", "summary", "document", "export"]):
152
+ steps.append("report")
153
+
154
+ # Forecast keywords
155
+ if any(k in m for k in ["forecast", "next", "horizon", "granite", "predict future",
156
+ "time series", "timeseries"]):
157
+ steps.append("forecast")
158
+ if "sql" not in steps:
159
+ steps.insert(0, "sql")
160
+
161
+ # Default to SQL if no steps identified
162
+ if not steps:
163
+ steps = ["sql"]
164
+
165
+ rationale = f"Rule-based plan based on keywords: {', '.join(steps)}"
166
+ logger.info(f"Heuristic plan: {steps}")
167
+ return {"steps": steps, "rationale": rationale}
168
+
169
+
170
+ def run_agent(
171
+ message: str,
172
+ hitl_decision: str = "Approve",
173
+ reviewer_note: str = ""
174
+ ) -> Tuple[str, pd.DataFrame]:
175
+ """
176
+ Main agent execution function.
177
+
178
+ Args:
179
+ message: User query
180
+ hitl_decision: Human-in-the-loop decision
181
+ reviewer_note: Optional review notes
182
+
183
+ Returns:
184
+ Tuple of (response_text, preview_dataframe)
185
+ """
186
+ try:
187
+ # Validate input
188
+ is_valid, error_msg = validate_message(message)
189
+ if not is_valid:
190
+ logger.warning(f"Invalid message: {error_msg}")
191
+ return f"❌ **Error:** {error_msg}", pd.DataFrame()
192
+
193
+ tracer.trace_event("user_message", {"message": message[:500]}) # Limit traced message length
194
+
195
+ # Plan actions
196
  try:
197
+ plan = plan_actions(message)
198
+ tracer.trace_event("plan", plan)
199
  except Exception as e:
200
+ logger.error(f"Planning failed: {e}")
201
+ return f"❌ **Planning Error:** Unable to create execution plan. {str(e)}", pd.DataFrame()
202
+
203
+ # Initialize result containers
204
+ sql_df = None
205
+ predict_df = None
206
+ explain_imgs = {}
207
+ artifacts = {}
208
+ ts_forecast_df = None
209
+ errors = []
210
+
211
+ # Execute SQL step
212
+ if "sql" in plan["steps"]:
213
+ try:
214
+ sql_df = sql_tool.run(message)
215
+ if isinstance(sql_df, pd.DataFrame):
216
+ artifacts["sql_rows"] = len(sql_df)
217
+ logger.info(f"SQL returned {len(sql_df)} rows")
218
+ else:
219
+ errors.append("SQL query returned no data")
220
+ except Exception as e:
221
+ error_msg = f"SQL execution failed: {str(e)}"
222
+ logger.error(error_msg)
223
+ errors.append(error_msg)
224
+
225
+ # Execute prediction step
226
+ if "predict" in plan["steps"]:
227
+ try:
228
+ if sql_df is not None and not sql_df.empty:
229
+ predict_df = predict_tool.run(sql_df)
230
+ if isinstance(predict_df, pd.DataFrame):
231
+ artifacts["predict_rows"] = len(predict_df)
232
+ logger.info(f"Predictions generated for {len(predict_df)} rows")
233
+ else:
234
+ errors.append("Prediction skipped: no data available")
235
+ except Exception as e:
236
+ error_msg = f"Prediction failed: {str(e)}"
237
+ logger.error(error_msg)
238
+ errors.append(error_msg)
239
+
240
+ # Build time series if possible
241
+ ts_df = None
242
+ if sql_df is not None and not sql_df.empty:
243
+ try:
244
+ ts_df = build_timeseries(sql_df)
245
+ logger.info(f"Time series built with {len(ts_df)} records")
246
+ except Exception as e:
247
+ logger.info(f"Time series preprocessing skipped: {e}")
248
+ # Not always an error - data might not be suitable for TS
249
+
250
+ # Execute forecast step
251
+ if "forecast" in plan["steps"]:
252
+ if ts_df is not None and not ts_df.empty:
253
+ try:
254
+ # Aggregate portfolio value by timestamp
255
+ agg = ts_df.groupby("timestamp", as_index=True)["portfolio_value"].sum().sort_index()
256
+
257
+ if len(agg) < 2:
258
+ errors.append("Insufficient time series data for forecasting (need at least 2 points)")
259
+ else:
260
+ # Validate horizon
261
+ horizon = min(DEFAULT_FORECAST_HORIZON, MAX_FORECAST_HORIZON)
262
+ ts_forecast_df = ts_tool.zeroshot_forecast(agg, horizon=horizon)
263
+
264
+ if isinstance(ts_forecast_df, pd.DataFrame):
265
+ if "error" in ts_forecast_df.columns:
266
+ errors.append(f"Forecast error: {ts_forecast_df['error'].iloc[0]}")
267
+ ts_forecast_df = None
268
+ else:
269
+ artifacts["forecast_horizon"] = len(ts_forecast_df)
270
+ logger.info(f"Forecast generated for {len(ts_forecast_df)} periods")
271
+ except Exception as e:
272
+ error_msg = f"Forecasting failed: {str(e)}"
273
+ logger.error(error_msg)
274
+ errors.append(error_msg)
275
+ else:
276
+ errors.append("Forecast skipped: no suitable time series data")
277
+
278
+ # Execute explanation step
279
+ if "explain" in plan["steps"]:
280
+ try:
281
+ explain_data = predict_df if predict_df is not None else sql_df
282
+ if explain_data is not None and not explain_data.empty:
283
+ explain_imgs = explain_tool.run(explain_data)
284
+ artifacts["explain_charts"] = len(explain_imgs)
285
+ logger.info(f"Generated {len(explain_imgs)} explanation charts")
286
+ else:
287
+ errors.append("Explanation skipped: no data available")
288
+ except Exception as e:
289
+ error_msg = f"Explanation failed: {str(e)}"
290
+ logger.error(error_msg)
291
+ errors.append(error_msg)
292
+
293
+ # Execute report generation
294
+ report_link = None
295
+ if "report" in plan["steps"]:
296
+ try:
297
+ forecast_preview = ts_forecast_df.head(50) if isinstance(ts_forecast_df, pd.DataFrame) else None
298
+ report_link = report_tool.render_and_save(
299
+ user_query=message,
300
+ sql_preview=sql_df.head(50) if isinstance(sql_df, pd.DataFrame) else None,
301
+ predict_preview=predict_df.head(50) if isinstance(predict_df, pd.DataFrame) else forecast_preview,
302
+ explain_images=explain_imgs,
303
+ plan=plan,
304
+ )
305
+ logger.info(f"Report generated: {report_link}")
306
+ except Exception as e:
307
+ error_msg = f"Report generation failed: {str(e)}"
308
+ logger.error(error_msg)
309
+ errors.append(error_msg)
310
+
311
+ # Log human-in-the-loop decision
312
+ tracer.trace_event("hitl", {
313
+ "message": message[:500],
314
+ "decision": hitl_decision,
315
+ "reviewer_note": reviewer_note[:500] if reviewer_note else "",
316
+ "artifacts": artifacts,
317
+ "plan": plan,
318
+ "errors": errors,
319
+ })
320
+
321
+ # Compose response
322
+ response = f"**Plan:** {', '.join(plan['steps'])}\n\n**Rationale:** {plan['rationale']}\n\n"
323
+
324
+ # Add artifacts info
325
+ if artifacts:
326
+ response += "**Results:**\n"
327
+ if "sql_rows" in artifacts:
328
+ response += f"- SQL query returned {artifacts['sql_rows']} rows\n"
329
+ if "predict_rows" in artifacts:
330
+ response += f"- Generated predictions for {artifacts['predict_rows']} rows\n"
331
+ if "forecast_horizon" in artifacts:
332
+ response += f"- Forecast generated for {artifacts['forecast_horizon']} periods\n"
333
+ if "explain_charts" in artifacts:
334
+ response += f"- Created {artifacts['explain_charts']} explanation charts\n"
335
+ response += "\n"
336
+
337
+ # Add report link
338
+ if report_link:
339
+ response += f"πŸ“„ **Report:** {report_link}\n\n"
340
+
341
+ # Add trace URL
342
+ if tracer.trace_url:
343
+ response += f"πŸ” **Trace:** {tracer.trace_url}\n\n"
344
+
345
+ # Add errors if any
346
+ if errors:
347
+ response += "**⚠️ Warnings/Errors:**\n"
348
+ for err in errors:
349
+ response += f"- {err}\n"
350
+
351
+ # Determine preview dataframe
352
+ if isinstance(ts_forecast_df, pd.DataFrame) and not ts_forecast_df.empty:
353
+ preview_df = ts_forecast_df.head(100)
354
+ elif isinstance(predict_df, pd.DataFrame) and not predict_df.empty:
355
+ preview_df = predict_df.head(100)
356
+ elif isinstance(sql_df, pd.DataFrame) and not sql_df.empty:
357
+ preview_df = sql_df.head(100)
358
+ else:
359
+ preview_df = pd.DataFrame({"message": ["No data to display"]})
360
+
361
+ return response, preview_df
362
+
363
+ except Exception as e:
364
+ error_msg = f"Unexpected error in agent execution: {str(e)}"
365
+ logger.exception(error_msg)
366
+ tracer.trace_event("error", {"message": error_msg})
367
+ return f"❌ **Critical Error:** {error_msg}", pd.DataFrame()
368
 
369
+
370
+ # Gradio Interface
371
+ with gr.Blocks(title="Tabular Agentic XAI") as demo:
372
+ gr.Markdown("""
373
+ # πŸ€– Tabular Agentic XAI (Enterprise Edition)
374
+
375
+ An intelligent assistant for analyzing tabular data with ML predictions, explanations, and time-series forecasting.
376
+
377
+ **Capabilities:**
378
+ - πŸ“Š SQL queries and data retrieval
379
+ - 🎯 ML predictions with confidence scores
380
+ - πŸ” SHAP-based model explanations
381
+ - πŸ“ˆ Time-series forecasting with Granite TTM
382
+ - πŸ“„ Automated report generation
383
+ """)
384
+
 
 
 
 
 
 
 
 
385
  with gr.Row():
386
+ msg = gr.Textbox(
387
+ label="Ask your question",
388
+ placeholder="e.g., Show me the top 10 customers by revenue, predict churn risk, forecast next quarter...",
389
+ lines=3
390
+ )
391
+
392
  with gr.Row():
393
+ hitl = gr.Radio(
394
+ ["Approve", "Needs Changes"],
395
+ value="Approve",
396
+ label="Human Review",
397
+ info="Review the planned actions before execution"
398
+ )
399
+ note = gr.Textbox(
400
+ label="Reviewer note (optional)",
401
+ placeholder="Add any review comments...",
402
+ lines=2
403
+ )
404
+
405
+ out_md = gr.Markdown(label="Response")
406
+ out_df = gr.Dataframe(
407
+ interactive=False,
408
+ label="Data Preview (max 100 rows)",
409
+ wrap=True
410
+ )
411
+
412
+ with gr.Row():
413
+ ask = gr.Button("πŸš€ Run Analysis", variant="primary")
414
+ clear = gr.Button("πŸ”„ Clear")
415
+
416
+ ask.click(
417
+ run_agent,
418
+ inputs=[msg, hitl, note],
419
+ outputs=[out_md, out_df]
420
+ )
421
+
422
+ clear.click(
423
+ lambda: ("", "Approve", "", "", pd.DataFrame()),
424
+ outputs=[msg, hitl, note, out_md, out_df]
425
+ )
426
+
427
+ gr.Markdown("""
428
+ ---
429
+ **Tips:**
430
+ - Be specific in your queries for better results
431
+ - Use natural language - the system will interpret your intent
432
+ - Review the execution plan before approving
433
+ - Check the trace link for detailed execution logs
434
+ """)
435
+
436
 
437
  if __name__ == "__main__":
438
+ logger.info("Starting Gradio application...")
439
+ demo.launch(
440
+ server_name="0.0.0.0",
441
+ server_port=7860,
442
+ show_error=True
443
+ )