mokrane25 commited on
Commit
6bd3a6a
Β·
verified Β·
1 Parent(s): 5bbdaee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +209 -174
app.py CHANGED
@@ -1,230 +1,265 @@
 
 
1
  import gradio as gr
2
  import os
3
- from llama_index.core import Settings
4
- from llama_index.llms.anthropic import Anthropic
5
- from llama_index.core.llms import ChatMessage, MessageRole
6
  import traceback
7
  import asyncio
8
- from dotenv import load_dotenv # Make sure to use this
9
- import uuid
10
- from models.task_prompt import TaskPrompt # Import from models directory
 
 
 
 
11
 
12
  # Load environment variables from .env file
13
  load_dotenv()
14
 
15
- # Import your ManagerAgent and related classes
16
- from manager_agent import ManagerAgent # Using the existing manager_agent.py file
17
-
18
  # --- Configuration ---
19
- LLM_MODEL = "claude-sonnet-4-20250514" # Modèle Claude Opus 4
20
 
21
- # --- Global variables for update handling ---
22
- status_update_queue = asyncio.Queue()
23
  current_status = "Ready"
 
 
 
 
 
 
 
24
 
25
  # --- LlamaIndex LLM Initialization ---
26
- def initialize_llm_and_agent():
27
- """Initialize LLM and Manager Agent with proper error handling"""
28
-
29
- # Check if API key is available
30
  api_key = os.environ.get("ANTHROPIC_API_KEY")
31
  if not api_key:
32
  print("\n" + "="*60)
33
  print("⚠️ ERROR: ANTHROPIC_API_KEY not found in environment variables!")
34
- print("Please set your API key using one of these methods:")
35
- print("1. Create a .env file with: ANTHROPIC_API_KEY=your-api-key-here")
36
- print("2. Set as environment variable: export ANTHROPIC_API_KEY='your-api-key-here'")
37
  print("="*60 + "\n")
38
- return None, None
39
-
40
  try:
41
- # Initialize LLM with lower temperature for more deterministic responses
42
- llm = Anthropic(
43
  model=LLM_MODEL,
44
- temperature=0.2, # Lower temperature for more focused responses
45
  max_tokens=4096
46
  )
47
- Settings.llm = llm
48
  print(f"Successfully initialized LlamaIndex with Anthropic model: {LLM_MODEL} (temperature=0.2)")
49
-
50
- # Initialize the ManagerAgent with update callback
51
- agent = ManagerAgent(
52
- llm,
53
- max_iterations=30, # Increase max_iterations to avoid "Reached max iterations" error
54
- update_callback=update_status # Pass the update callback function
55
  )
56
  print("βœ… ManagerAgent initialized successfully")
57
-
58
- return llm, agent
59
-
60
  except Exception as e:
61
  print(f"Error initializing Anthropic LLM or ManagerAgent: {e}")
62
  traceback.print_exc()
63
- return None, None
64
 
65
- # --- Update callback function ---
66
- def update_status(message):
67
- """Callback function for the ManagerAgent to update status"""
68
  global current_status
 
 
 
69
  current_status = message
70
-
71
- # Add to queue for the status monitor
72
- try:
73
- status_update_queue.put_nowait(message)
74
- except Exception as e:
75
- print(f"Error adding status update to queue: {e}")
76
 
77
- # --- Status monitor function ---
78
- async def status_monitor():
79
- """Monitor for status updates and update the Gradio component"""
80
  global current_status
81
-
82
- try:
83
- # Set initial status
84
- yield current_status
85
-
86
- # Monitor for updates
87
- while True:
88
- try:
89
- # Check for new updates with a timeout
90
- message = await asyncio.wait_for(status_update_queue.get(), timeout=0.5)
91
- yield message
92
- except asyncio.TimeoutError:
93
- # No updates, continue waiting
94
- await asyncio.sleep(0.1)
95
- except Exception as e:
96
- print(f"Error in status monitor: {e}")
97
- await asyncio.sleep(1)
98
- except Exception as e:
99
- print(f"Status monitor error: {e}")
100
- yield f"Status monitor error: {str(e)}"
101
-
102
- # --- Gradio Chat Function ---
103
- async def respond(message, history):
104
- """Fonction de rΓ©ponse pour Gradio ChatInterface utilisant ManagerAgent"""
105
-
106
- # VΓ©rifier si le ManagerAgent est initialisΓ©
107
- if manager_agent is None or Settings.llm is None:
108
- yield "❌ ManagerAgent not initialized. Please check your ANTHROPIC_API_KEY environment variable and ensure all components are properly loaded."
109
- return
110
-
111
- try:
112
- print(f"\nπŸ€– ManagerAgent: Processing user message: '{message[:100]}{'...' if len(message) > 100 else ''}'")
113
-
114
- # Update status
115
- update_status(f"Processing your request: '{message[:50]}{'...' if len(message) > 50 else ''}'")
116
-
117
- # CrΓ©er un TaskPrompt Γ  partir du message utilisateur
118
- # Le ManagerAgent va analyser le prompt et dΓ©cider du workflow appropriΓ©
119
- task_prompt = TaskPrompt(text=message)
120
-
121
- print("πŸ”„ ManagerAgent: Starting task execution workflow...")
122
-
123
- # Get complete response
124
- response = manager_agent.run_task(task_prompt)
125
-
126
- # Simuler un streaming progressif de la rΓ©ponse pour une meilleure UX
127
- words = response.split()
128
- partial_response = ""
129
-
130
- for i, word in enumerate(words):
131
- partial_response += word + " "
132
-
133
- # Yield la rΓ©ponse progressive toutes les quelques mots pour un effet de streaming
134
- if i % 3 == 0 or i == len(words) - 1: # Toutes les 3 mots ou le dernier mot
135
- yield partial_response.strip()
136
- # Add a small delay for realistic streaming
137
- await asyncio.sleep(0.01)
138
-
139
- print("βœ… ManagerAgent: Task completed successfully")
140
- update_status("Ready for your next request")
141
-
142
- except Exception as e:
143
- error_message = f"❌ ManagerAgent Error: {str(e)}"
144
- print(f"\n🚨 ManagerAgent Error: {e}")
145
- traceback.print_exc()
146
- update_status(f"Error: {str(e)}")
147
- yield error_message
148
 
149
  # --- Gradio Interface Setup ---
150
- def create_interface():
151
- """CrΓ©e l'interface Gradio"""
152
-
153
- # VΓ©rifier la clΓ© API
154
  if "ANTHROPIC_API_KEY" not in os.environ:
155
- print("\n" + "="*60)
156
- print("⚠️ WARNING: ANTHROPIC_API_KEY not found in environment variables!")
157
- print("Please set your API key:")
158
- print("export ANTHROPIC_API_KEY='your-api-key-here'")
159
- print("="*60 + "\n")
160
-
161
- # Create the chat interface
162
- interface = gr.ChatInterface(
163
- fn=respond,
164
- chatbot=gr.Chatbot(
165
  height=500,
166
  show_label=False,
167
- container=True,
168
- type="messages"
169
- ),
170
- textbox=gr.Textbox(
171
- placeholder="Tapez votre message ici...",
172
- container=False,
173
- scale=7
174
- ),
175
- title="ALITA",
176
- description=f"ALITA is a self-learning AI agent that can search for information, analyze data, create tools, and orchestrate complex tasks.",
177
- examples=[
178
- "πŸ” Recherche des informations sur l'intelligence artificielle",
179
- "πŸ“Š Analyse les tendances du marchΓ© technologique",
180
- "⚑ Crée un script pour automatiser une tÒche répétitive",
181
- "🌐 Trouve des ressources open source pour machine learning",
182
- ],
183
- theme="soft"
184
- )
185
-
186
- # Add the status box
187
- with interface:
188
  with gr.Row():
189
- with gr.Column():
190
- status_box = gr.Textbox(
191
- label="Agent Status",
192
- value="Ready",
193
- interactive=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  )
 
 
 
 
 
 
 
 
 
 
 
195
 
196
- # Use the Interface for status monitoring
197
- gr.Interface(
198
- fn=status_monitor,
199
- inputs=None,
200
- outputs=status_box,
201
- live=True,
202
- show_progress=False
203
- )
204
-
205
- return interface
 
 
 
 
 
 
 
206
 
207
- # Initialize the components
208
- llm, manager_agent = initialize_llm_and_agent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
  # --- Launch the Application ---
211
  if __name__ == "__main__":
 
 
212
  print("πŸš€ Starting Gradio ALITA Chat Application...")
213
-
214
- # CrΓ©er et lancer l'interface
215
- app = create_interface()
216
-
217
  try:
218
- app.launch(
219
  share=False,
220
  server_name="127.0.0.1",
221
- server_port=7825,
222
- show_error=True
 
223
  )
224
  except KeyboardInterrupt:
225
  print("\nπŸ‘‹ Application stopped by user")
226
  except Exception as e:
227
  print(f"\n❌ Error launching application: {e}")
228
  traceback.print_exc()
229
-
 
 
 
230
  print("βœ… Gradio application stopped.")
 
1
+ # app.py
2
+
3
  import gradio as gr
4
  import os
 
 
 
5
  import traceback
6
  import asyncio
7
+ from dotenv import load_dotenv
8
+ from models.task_prompt import TaskPrompt
9
+ import time
10
+ from llama_index.core import Settings as LlamaSettings # Import at top level
11
+ from llama_index.llms.anthropic import Anthropic # Import at top level
12
+ from manager_agent import ManagerAgent # Ensure this path is correct
13
+ import concurrent.futures # For running blocking code in a separate thread
14
 
15
  # Load environment variables from .env file
16
  load_dotenv()
17
 
 
 
 
18
  # --- Configuration ---
19
+ LLM_MODEL = "claude-sonnet-4-20250514"
20
 
21
+ # --- Global variables ---
 
22
  current_status = "Ready"
23
+ llm_global = None
24
+ manager_agent_global = None
25
+ # Settings_global is not strictly needed as a global if LlamaSettings is imported directly
26
+
27
+ # Thread pool executor for running blocking agent tasks
28
+ thread_pool_executor = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count() or 1)
29
+
30
 
31
  # --- LlamaIndex LLM Initialization ---
32
+ def initialize_components():
33
+ global llm_global, manager_agent_global
34
+
 
35
  api_key = os.environ.get("ANTHROPIC_API_KEY")
36
  if not api_key:
37
  print("\n" + "="*60)
38
  print("⚠️ ERROR: ANTHROPIC_API_KEY not found in environment variables!")
39
+ print("Please set your API key (e.g., in a .env file).")
 
 
40
  print("="*60 + "\n")
41
+ return
42
+
43
  try:
44
+ llm_global = Anthropic(
 
45
  model=LLM_MODEL,
46
+ temperature=0.2,
47
  max_tokens=4096
48
  )
49
+ LlamaSettings.llm = llm_global # Use the imported LlamaSettings directly
50
  print(f"Successfully initialized LlamaIndex with Anthropic model: {LLM_MODEL} (temperature=0.2)")
51
+
52
+ manager_agent_global = ManagerAgent(
53
+ llm_global,
54
+ max_iterations=30, # Keep this reasonable for testing
55
+ update_callback=update_status_callback
 
56
  )
57
  print("βœ… ManagerAgent initialized successfully")
58
+
 
 
59
  except Exception as e:
60
  print(f"Error initializing Anthropic LLM or ManagerAgent: {e}")
61
  traceback.print_exc()
 
62
 
63
+ # --- Update callback function (called by ManagerAgent) ---
64
+ def update_status_callback(message):
 
65
  global current_status
66
+ # This function is called from the ManagerAgent's thread (potentially)
67
+ # or the ReAct agent's execution context.
68
+ # It needs to update the global variable, which the Gradio polling thread will pick up.
69
  current_status = message
70
+ print(f"βœ… UI_STATUS_UPDATE (via callback): {message}") # Differentiate console log
 
 
 
 
 
71
 
72
+ # --- Status retrieval function for Gradio polling ---
73
+ def get_current_status_for_ui():
 
74
  global current_status
75
+ timestamp = time.time()
76
+ return f"{current_status}<span style='display:none;'>{timestamp}</span>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # --- Gradio Interface Setup ---
79
+ def create_gradio_interface():
 
 
 
80
  if "ANTHROPIC_API_KEY" not in os.environ:
81
+ gr.Warning("ANTHROPIC_API_KEY not found in environment variables! ALITA may not function correctly.")
82
+
83
+ with gr.Blocks(theme="soft") as demo:
84
+ gr.Markdown("# ALITA")
85
+ gr.Markdown("ALITA is a self-learning AI agent that can search for information, analyze data, create tools, and orchestrate complex tasks.")
86
+
87
+ chatbot_component = gr.Chatbot(
88
+ label="Chat",
 
 
89
  height=500,
90
  show_label=False,
91
+ # type='messages' # For Gradio 4.x+
92
+ )
93
+ gr.Markdown("Gradio version: " + gr.__version__ + " (Chatbot type defaults to 'tuples' for older versions. Consider `type='messages'` for newer Gradio if issues persist with chat display).")
94
+
95
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  with gr.Row():
97
+ message_textbox = gr.Textbox(
98
+ placeholder="Type your message here...",
99
+ scale=7,
100
+ show_label=False,
101
+ container=False
102
+ )
103
+
104
+ gr.Examples(
105
+ examples=[
106
+ "πŸ” Search for information on artificial intelligence",
107
+ "πŸ“Š Analyze technology market trends",
108
+ "⚑ Create a script to automate a repetitive task",
109
+ "🌐 Find open source resources for machine learning",
110
+ ],
111
+ inputs=message_textbox,
112
+ )
113
+
114
+ status_box_component = gr.Textbox(
115
+ label="Agent Status",
116
+ value=get_current_status_for_ui(),
117
+ interactive=False,
118
+ # elem_id="status_box_alita" # For potential direct JS manipulation if desperate (avoid)
119
+ )
120
+
121
+ def add_user_msg(user_input_text, chat_history_list):
122
+ if not user_input_text.strip():
123
+ return gr.update(), chat_history_list
124
+ # For older Gradio, history is list of [user_msg, bot_msg] tuples
125
+ chat_history_list.append((user_input_text, None))
126
+ return gr.update(value=""), chat_history_list
127
+
128
+ async def generate_bot_reply(chat_history_list):
129
+ if not chat_history_list or chat_history_list[-1][0] is None:
130
+ # This case should ideally not be reached if add_user_msg works correctly
131
+ yield chat_history_list
132
+ return
133
+
134
+ user_message = chat_history_list[-1][0]
135
+
136
+ if manager_agent_global is None or LlamaSettings.llm is None:
137
+ # This update_status_callback will set current_status
138
+ # The polling mechanism (continuous_status_updater) should pick it up.
139
+ update_status_callback("⚠️ Error: Agent or LLM not initialized. Check API key and logs.")
140
+ # For older Gradio, update the last tuple's second element
141
+ chat_history_list[-1] = (chat_history_list[-1][0], "❌ Critical Error: ALITA is not properly initialized. Please check server logs and API key.")
142
+ yield chat_history_list
143
+ return
144
+
145
+ try:
146
+ print(f"\nπŸ€– GRADIOLOG: Processing user message: '{user_message[:100]}{'...' if len(user_message) > 100 else ''}'")
147
+ update_status_callback(f"πŸ’¬ Processing: '{user_message[:50]}{'...' if len(user_message) > 50 else ''}'")
148
+ await asyncio.sleep(0.01) # Allow UI to briefly update with "Processing..."
149
+
150
+ task_prompt = TaskPrompt(text=user_message)
151
+
152
+ update_status_callback("πŸ”„ Analyzing request and determining optimal workflow...")
153
+ await asyncio.sleep(0.01) # Allow UI to briefly update
154
+
155
+ # Run the blocking manager_agent_global.run_task in a separate thread
156
+ loop = asyncio.get_event_loop()
157
+ response_text_from_agent = await loop.run_in_executor(
158
+ thread_pool_executor,
159
+ manager_agent_global.run_task, # The function to run
160
+ task_prompt # Arguments to the function
161
  )
162
+ # By this point, run_task has completed, and all its internal
163
+ # calls to update_status_callback (via send_update) should have occurred.
164
+ # The polling mechanism should have picked up these changes.
165
+
166
+ update_status_callback("✨ Generating final response stream...")
167
+ await asyncio.sleep(0.01)
168
+ final_bot_response = response_text_from_agent
169
+
170
+ words = final_bot_response.split()
171
+ accumulated_response_stream = ""
172
+ total_words = len(words)
173
 
174
+ # Initialize bot's part of the message in history for older Gradio
175
+ current_user_message = chat_history_list[-1][0]
176
+ chat_history_list[-1] = (current_user_message, "")
177
+
178
+
179
+ if not words:
180
+ chat_history_list[-1] = (current_user_message, final_bot_response.strip())
181
+ yield chat_history_list
182
+ else:
183
+ for i, word in enumerate(words):
184
+ accumulated_response_stream += word + " "
185
+ # These status updates are for the streaming part,
186
+ # agent's internal updates should have already happened.
187
+ if total_words > 0: # Avoid division by zero
188
+ if i == total_words // 4: update_status_callback("πŸ”„ Streaming response (25%)...")
189
+ elif i == total_words // 2: update_status_callback("πŸ”„ Streaming response (50%)...")
190
+ elif i == (total_words * 3) // 4: update_status_callback("πŸ”„ Streaming response (75%)...")
191
 
192
+ if i % 3 == 0 or i == len(words) - 1:
193
+ chat_history_list[-1] = (current_user_message, accumulated_response_stream.strip())
194
+ yield chat_history_list
195
+ await asyncio.sleep(0.01) # For streaming effect
196
+
197
+ # Ensure final complete response is set
198
+ if chat_history_list[-1][1] != final_bot_response.strip():
199
+ chat_history_list[-1] = (current_user_message, final_bot_response.strip())
200
+ yield chat_history_list
201
+
202
+ print("βœ… GRADIOLOG: Task processing and streaming completed.")
203
+ update_status_callback("βœ… Ready for your next request")
204
+
205
+ except Exception as e:
206
+ error_message_for_ui = f"❌ Gradio/Agent Error: {str(e)}"
207
+ print(f"\n🚨 GRADIOLOG: Error in generate_bot_reply: {e}")
208
+ traceback.print_exc()
209
+ update_status_callback(f"❌ Error: {str(e)[:100]}...")
210
+ chat_history_list[-1] = (chat_history_list[-1][0], error_message_for_ui)
211
+ yield chat_history_list
212
+
213
+ message_textbox.submit(
214
+ add_user_msg,
215
+ inputs=[message_textbox, chatbot_component],
216
+ outputs=[message_textbox, chatbot_component],
217
+ show_progress="hidden", # Gradio 3.x might not have this, can be ignored
218
+ ).then(
219
+ generate_bot_reply,
220
+ inputs=[chatbot_component],
221
+ outputs=[chatbot_component],
222
+ api_name=False, # Good practice
223
+ # show_progress="hidden", # Gradio 3.x might not have this
224
+ )
225
+
226
+ async def continuous_status_updater(update_interval_seconds=0.3): # Slightly faster poll
227
+ """Continuously yields status updates for the status_box_component."""
228
+ print("GRADIOLOG: Starting continuous_status_updater loop.")
229
+ while True:
230
+ # print(f"POLL: Fetching status: {current_status}") # DEBUG: very verbose
231
+ yield get_current_status_for_ui()
232
+ await asyncio.sleep(update_interval_seconds)
233
+
234
+ demo.load(continuous_status_updater, inputs=None, outputs=status_box_component)
235
+ print("GRADIOLOG: Continuous status updater loaded.")
236
+ return demo
237
+
238
+ # Initialize LLM and Agent components
239
+ initialize_components()
240
 
241
  # --- Launch the Application ---
242
  if __name__ == "__main__":
243
+ print(f"Gradio version: {gr.__version__}")
244
+
245
  print("πŸš€ Starting Gradio ALITA Chat Application...")
246
+ alita_interface = create_gradio_interface()
247
+
 
 
248
  try:
249
+ alita_interface.launch(
250
  share=False,
251
  server_name="127.0.0.1",
252
+ server_port=6126,
253
+ show_error=True,
254
+ # debug=True # Can be helpful
255
  )
256
  except KeyboardInterrupt:
257
  print("\nπŸ‘‹ Application stopped by user")
258
  except Exception as e:
259
  print(f"\n❌ Error launching application: {e}")
260
  traceback.print_exc()
261
+ finally:
262
+ print("Shutting down thread pool executor...")
263
+ thread_pool_executor.shutdown(wait=True) # Clean up threads
264
+
265
  print("βœ… Gradio application stopped.")