mokrane25 commited on
Commit
fede4d9
·
verified ·
1 Parent(s): 1cef225

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +178 -204
app.py CHANGED
@@ -1,256 +1,230 @@
1
- # app.py
2
-
3
  import gradio as gr
4
  import os
 
 
 
5
  import traceback
6
  import asyncio
7
- from dotenv import load_dotenv
8
- from models.task_prompt import TaskPrompt # Ensure this path is correct
9
- import time
10
- from llama_index.core import Settings as LlamaSettings
11
- from llama_index.llms.anthropic import Anthropic
12
- from manager_agent import ManagerAgent # Ensure this path is correct
13
- import concurrent.futures
14
 
15
  # Load environment variables from .env file
16
  load_dotenv()
17
 
 
 
 
18
  # --- Configuration ---
19
- LLM_MODEL = "claude-sonnet-4-20250514" # Ensure this model is available to your Anthropic key
20
 
21
- # --- Global variables ---
 
22
  current_status = "Ready"
23
- llm_global = None
24
- manager_agent_global = None
25
-
26
- # Thread pool executor for running blocking agent tasks
27
- # Use a smaller number of workers on resource-constrained environments like HF Spaces free tier
28
- MAX_WORKERS = min(4, (os.cpu_count() or 1) + 4) # Based on concurrent.futures recommendation
29
- thread_pool_executor = concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)
30
-
31
 
32
  # --- LlamaIndex LLM Initialization ---
33
- def initialize_components():
34
- global llm_global, manager_agent_global
35
-
 
36
  api_key = os.environ.get("ANTHROPIC_API_KEY")
37
  if not api_key:
38
  print("\n" + "="*60)
39
  print("⚠️ ERROR: ANTHROPIC_API_KEY not found in environment variables!")
40
- print("Please set your ANTHROPIC_API_KEY as a Secret in your Hugging Face Space.")
 
 
41
  print("="*60 + "\n")
42
- # The app will still load, but the agent won't work.
43
- # UI should ideally show a persistent error if this happens.
44
- return
45
-
46
  try:
47
- llm_global = Anthropic(
 
48
  model=LLM_MODEL,
49
- temperature=0.2,
50
- max_tokens=4096 # Consider if this is too high for some models/responses
51
  )
52
- LlamaSettings.llm = llm_global
53
- print(f"Successfully initialized LlamaIndex with Anthropic model: {LLM_MODEL}")
54
-
55
- manager_agent_global = ManagerAgent(
56
- llm_global,
57
- max_iterations=15, # Reduced for HF Spaces to avoid timeouts on complex tasks
58
- update_callback=update_status_callback
 
59
  )
60
  print("✅ ManagerAgent initialized successfully")
61
-
 
 
62
  except Exception as e:
63
  print(f"Error initializing Anthropic LLM or ManagerAgent: {e}")
64
  traceback.print_exc()
65
- # llm_global or manager_agent_global might be None, handled in generate_bot_reply
66
 
67
- # --- Update callback function (called by ManagerAgent) ---
68
- def update_status_callback(message):
 
69
  global current_status
70
  current_status = message
71
- print(f"✅ UI_STATUS_UPDATE (via callback): {message}")
 
 
 
 
 
72
 
73
- # --- Status retrieval function for Gradio polling ---
74
- def get_current_status_for_ui():
 
75
  global current_status
76
- timestamp = time.time()
77
- return f"{current_status}<span style='display:none;'>{timestamp}</span>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  # --- Gradio Interface Setup ---
80
- def create_gradio_interface():
81
- # Check for API key early for user feedback if possible, though init happens later
 
 
82
  if "ANTHROPIC_API_KEY" not in os.environ:
83
- # This warning will appear at build time if .env is not used on Spaces
84
- # For runtime, secrets are preferred.
85
- print("Warning: ANTHROPIC_API_KEY not found in os.environ during UI creation.")
86
-
87
-
88
- with gr.Blocks(theme="soft") as demo:
89
- gr.Markdown("# ALITA")
90
- gr.Markdown(f"ALITA is a self-learning AI agent. (Gradio Version: {gr.__version__})")
91
-
92
- chatbot_component = gr.Chatbot(
93
- label="Chat",
94
  height=500,
95
  show_label=False,
96
- type='tuples' # Explicitly set for Gradio 3.x to align with history format
97
- )
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  with gr.Row():
100
- message_textbox = gr.Textbox(
101
- placeholder="Tapez votre message ici...",
102
- scale=7,
103
- show_label=False,
104
- container=False
105
- )
106
-
107
- gr.Examples(
108
- examples=[
109
- "Hello",
110
- "What is the temperature in Paris now?",
111
- "🔍 Recherche des informations sur l'intelligence artificielle",
112
- "📊 Analyse les tendances du marché technologique",
113
- ],
114
- inputs=message_textbox,
115
- )
116
-
117
- status_box_component = gr.Textbox(
118
- label="Agent Status",
119
- value=get_current_status_for_ui(), # Initial value
120
- interactive=False,
121
- )
122
-
123
- def add_user_msg(user_input_text, chat_history_list):
124
- if chat_history_list is None: # Robustness for first message
125
- chat_history_list = []
126
- if not user_input_text.strip():
127
- return gr.update(value=user_input_text), chat_history_list # Return original textbox value if empty
128
- chat_history_list.append((user_input_text, None)) # Using tuple format
129
- return gr.update(value=""), chat_history_list
130
-
131
- async def generate_bot_reply(chat_history_list):
132
- if chat_history_list is None or not chat_history_list or chat_history_list[-1][0] is None:
133
- yield [] # Return empty history or handle as error
134
- return
135
-
136
- user_message = chat_history_list[-1][0]
137
-
138
- if manager_agent_global is None or LlamaSettings.llm is None:
139
- update_status_callback("⚠️ Error: Agent or LLM not initialized. Check API key and logs.")
140
- # Ensure history is not None before trying to update
141
- current_user_msg_tuple = chat_history_list[-1] if chat_history_list else (user_message, None)
142
- chat_history_list[-1] = (current_user_msg_tuple[0], "❌ Critical Error: ALITA is not properly initialized. Please check server logs and API key.")
143
- yield chat_history_list
144
- return
145
-
146
- try:
147
- print(f"\n🤖 GRADIOLOG: Processing user message: '{user_message[:100]}'")
148
- update_status_callback(f"💬 Processing: '{user_message[:50]}{'...' if len(user_message) > 50 else ''}'")
149
- await asyncio.sleep(0.01)
150
-
151
- task_prompt = TaskPrompt(text=user_message)
152
-
153
- update_status_callback("🔄 Analyzing request...")
154
- await asyncio.sleep(0.01)
155
-
156
- loop = asyncio.get_event_loop()
157
- response_text_from_agent = await loop.run_in_executor(
158
- thread_pool_executor,
159
- manager_agent_global.run_task,
160
- task_prompt
161
  )
162
-
163
- update_status_callback("✨ Generating final response stream...")
164
- await asyncio.sleep(0.01)
165
- final_bot_response = response_text_from_agent
166
-
167
- words = final_bot_response.split()
168
- accumulated_response_stream = ""
169
 
170
- current_user_msg_tuple = chat_history_list[-1]
171
- chat_history_list[-1] = (current_user_msg_tuple[0], "") # Initialize bot response in tuple
172
-
173
- if not words:
174
- chat_history_list[-1] = (current_user_msg_tuple[0], final_bot_response.strip())
175
- yield chat_history_list
176
- else:
177
- for i, word in enumerate(words):
178
- accumulated_response_stream += word + " "
179
- if len(words) > 0:
180
- if i == len(words) // 4: update_status_callback("🔄 Streaming response (25%)...")
181
- elif i == len(words) // 2: update_status_callback("🔄 Streaming response (50%)...")
182
- elif i == (len(words) * 3) // 4: update_status_callback("🔄 Streaming response (75%)...")
183
-
184
- if i % 3 == 0 or i == len(words) - 1:
185
- chat_history_list[-1] = (current_user_msg_tuple[0], accumulated_response_stream.strip())
186
- yield chat_history_list
187
- await asyncio.sleep(0.01)
188
-
189
- if chat_history_list[-1][1] != final_bot_response.strip():
190
- chat_history_list[-1] = (current_user_msg_tuple[0], final_bot_response.strip())
191
- yield chat_history_list
192
-
193
- print("✅ GRADIOLOG: Task processing and streaming completed.")
194
- update_status_callback("✅ Ready for your next request")
195
-
196
- except Exception as e:
197
- error_message_for_ui = f"❌ Gradio/Agent Error: {str(e)}"
198
- print(f"\n🚨 GRADIOLOG: Error in generate_bot_reply: {e}")
199
- traceback.print_exc()
200
- update_status_callback(f"❌ Error: {str(e)[:100]}...")
201
- current_user_msg_tuple = chat_history_list[-1] if chat_history_list else (user_message, None)
202
- chat_history_list[-1] = (current_user_msg_tuple[0], error_message_for_ui)
203
- yield chat_history_list
204
-
205
- message_textbox.submit(
206
- add_user_msg,
207
- inputs=[message_textbox, chatbot_component],
208
- outputs=[message_textbox, chatbot_component],
209
- ).then(
210
- generate_bot_reply,
211
- inputs=[chatbot_component],
212
- outputs=[chatbot_component],
213
- api_name=False,
214
- )
215
-
216
- async def continuous_status_updater(update_interval_seconds=0.3):
217
- print("GRADIOLOG: Starting continuous_status_updater loop.")
218
- while True:
219
- yield get_current_status_for_ui()
220
- await asyncio.sleep(update_interval_seconds)
221
-
222
- # demo.load is the correct way to start a background task on Blocks load
223
- demo.load(continuous_status_updater, inputs=None, outputs=status_box_component)
224
- print("GRADIOLOG: Continuous status updater loaded.")
225
- return demo
226
 
227
- # Initialize LLM and Agent components
228
- # This should be called AFTER defining functions it might use if there are complex dependencies,
229
- # but for global setup, it's fine here.
230
- initialize_components()
231
 
232
  # --- Launch the Application ---
233
  if __name__ == "__main__":
234
- print(f"Gradio version being used: {gr.__version__}")
235
-
236
  print("🚀 Starting Gradio ALITA Chat Application...")
237
- alita_interface = create_gradio_interface()
238
-
 
 
239
  try:
240
- alita_interface.launch(
241
- # share=False, # Share=True can sometimes trigger /api_info more aggressively
242
- server_name="0.0.0.0", # For HF Spaces, 0.0.0.0 is often needed
243
- server_port=int(os.environ.get('PORT', 7860)), # HF Spaces sets PORT env var
244
- show_error=True,
245
  )
246
  except KeyboardInterrupt:
247
  print("\n👋 Application stopped by user")
248
  except Exception as e:
249
  print(f"\n❌ Error launching application: {e}")
250
  traceback.print_exc()
251
- finally:
252
- print("Shutting down thread pool executor...")
253
- if thread_pool_executor: # Check if it was initialized
254
- thread_pool_executor.shutdown(wait=False) # HF Spaces might kill it anyway
255
-
256
  print("✅ Gradio application stopped.")
 
 
 
1
  import gradio as gr
2
  import os
3
+ from llama_index.core import Settings
4
+ from llama_index.llms.anthropic import Anthropic
5
+ from llama_index.core.llms import ChatMessage, MessageRole
6
  import traceback
7
  import asyncio
8
+ from dotenv import load_dotenv # Make sure to use this
9
+ import uuid
10
+ from models.task_prompt import TaskPrompt # Import from models directory
 
 
 
 
11
 
12
  # Load environment variables from .env file
13
  load_dotenv()
14
 
15
+ # Import your ManagerAgent and related classes
16
+ from manager_agent2 import ManagerAgent # Using the existing manager_agent.py file
17
+
18
  # --- Configuration ---
19
+ LLM_MODEL = "claude-sonnet-4-20250514" # Modèle Claude Opus 4
20
 
21
+ # --- Global variables for update handling ---
22
+ status_update_queue = asyncio.Queue()
23
  current_status = "Ready"
 
 
 
 
 
 
 
 
24
 
25
  # --- LlamaIndex LLM Initialization ---
26
+ def initialize_llm_and_agent():
27
+ """Initialize LLM and Manager Agent with proper error handling"""
28
+
29
+ # Check if API key is available
30
  api_key = os.environ.get("ANTHROPIC_API_KEY")
31
  if not api_key:
32
  print("\n" + "="*60)
33
  print("⚠️ ERROR: ANTHROPIC_API_KEY not found in environment variables!")
34
+ print("Please set your API key using one of these methods:")
35
+ print("1. Create a .env file with: ANTHROPIC_API_KEY=your-api-key-here")
36
+ print("2. Set as environment variable: export ANTHROPIC_API_KEY='your-api-key-here'")
37
  print("="*60 + "\n")
38
+ return None, None
39
+
 
 
40
  try:
41
+ # Initialize LLM with lower temperature for more deterministic responses
42
+ llm = Anthropic(
43
  model=LLM_MODEL,
44
+ temperature=0.2, # Lower temperature for more focused responses
45
+ max_tokens=4096
46
  )
47
+ Settings.llm = llm
48
+ print(f"Successfully initialized LlamaIndex with Anthropic model: {LLM_MODEL} (temperature=0.2)")
49
+
50
+ # Initialize the ManagerAgent with update callback
51
+ agent = ManagerAgent(
52
+ llm,
53
+ max_iterations=30, # Increase max_iterations to avoid "Reached max iterations" error
54
+ update_callback=update_status # Pass the update callback function
55
  )
56
  print("✅ ManagerAgent initialized successfully")
57
+
58
+ return llm, agent
59
+
60
  except Exception as e:
61
  print(f"Error initializing Anthropic LLM or ManagerAgent: {e}")
62
  traceback.print_exc()
63
+ return None, None
64
 
65
+ # --- Update callback function ---
66
+ def update_status(message):
67
+ """Callback function for the ManagerAgent to update status"""
68
  global current_status
69
  current_status = message
70
+
71
+ # Add to queue for the status monitor
72
+ try:
73
+ status_update_queue.put_nowait(message)
74
+ except Exception as e:
75
+ print(f"Error adding status update to queue: {e}")
76
 
77
+ # --- Status monitor function ---
78
+ async def status_monitor():
79
+ """Monitor for status updates and update the Gradio component"""
80
  global current_status
81
+
82
+ try:
83
+ # Set initial status
84
+ yield current_status
85
+
86
+ # Monitor for updates
87
+ while True:
88
+ try:
89
+ # Check for new updates with a timeout
90
+ message = await asyncio.wait_for(status_update_queue.get(), timeout=0.5)
91
+ yield message
92
+ except asyncio.TimeoutError:
93
+ # No updates, continue waiting
94
+ await asyncio.sleep(0.1)
95
+ except Exception as e:
96
+ print(f"Error in status monitor: {e}")
97
+ await asyncio.sleep(1)
98
+ except Exception as e:
99
+ print(f"Status monitor error: {e}")
100
+ yield f"Status monitor error: {str(e)}"
101
+
102
+ # --- Gradio Chat Function ---
103
+ async def respond(message, history):
104
+ """Fonction de réponse pour Gradio ChatInterface utilisant ManagerAgent"""
105
+
106
+ # Vérifier si le ManagerAgent est initialisé
107
+ if manager_agent is None or Settings.llm is None:
108
+ yield "❌ ManagerAgent not initialized. Please check your ANTHROPIC_API_KEY environment variable and ensure all components are properly loaded."
109
+ return
110
+
111
+ try:
112
+ print(f"\n🤖 ManagerAgent: Processing user message: '{message[:100]}{'...' if len(message) > 100 else ''}'")
113
+
114
+ # Update status
115
+ update_status(f"Processing your request: '{message[:50]}{'...' if len(message) > 50 else ''}'")
116
+
117
+ # Créer un TaskPrompt à partir du message utilisateur
118
+ # Le ManagerAgent va analyser le prompt et décider du workflow approprié
119
+ task_prompt = TaskPrompt(text=message)
120
+
121
+ print("🔄 ManagerAgent: Starting task execution workflow...")
122
+
123
+ # Get complete response
124
+ response = manager_agent.run_task(task_prompt)
125
+
126
+ # Simuler un streaming progressif de la réponse pour une meilleure UX
127
+ words = response.split()
128
+ partial_response = ""
129
+
130
+ for i, word in enumerate(words):
131
+ partial_response += word + " "
132
+
133
+ # Yield la réponse progressive toutes les quelques mots pour un effet de streaming
134
+ if i % 3 == 0 or i == len(words) - 1: # Toutes les 3 mots ou le dernier mot
135
+ yield partial_response.strip()
136
+ # Add a small delay for realistic streaming
137
+ await asyncio.sleep(0.01)
138
+
139
+ print("✅ ManagerAgent: Task completed successfully")
140
+ update_status("Ready for your next request")
141
+
142
+ except Exception as e:
143
+ error_message = f"❌ ManagerAgent Error: {str(e)}"
144
+ print(f"\n🚨 ManagerAgent Error: {e}")
145
+ traceback.print_exc()
146
+ update_status(f"Error: {str(e)}")
147
+ yield error_message
148
 
149
  # --- Gradio Interface Setup ---
150
+ def create_interface():
151
+ """Crée l'interface Gradio"""
152
+
153
+ # Vérifier la clé API
154
  if "ANTHROPIC_API_KEY" not in os.environ:
155
+ print("\n" + "="*60)
156
+ print("⚠️ WARNING: ANTHROPIC_API_KEY not found in environment variables!")
157
+ print("Please set your API key:")
158
+ print("export ANTHROPIC_API_KEY='your-api-key-here'")
159
+ print("="*60 + "\n")
160
+
161
+ # Create the chat interface
162
+ interface = gr.ChatInterface(
163
+ fn=respond,
164
+ chatbot=gr.Chatbot(
 
165
  height=500,
166
  show_label=False,
167
+ container=True,
168
+ type="messages"
169
+ ),
170
+ textbox=gr.Textbox(
171
+ placeholder="Tapez votre message ici...",
172
+ container=False,
173
+ scale=7
174
+ ),
175
+ title="ALITA",
176
+ description=f"ALITA is a self-learning AI agent that can search for information, analyze data, create tools, and orchestrate complex tasks.",
177
+ examples=[
178
+ "🔍 Recherche des informations sur l'intelligence artificielle",
179
+ "📊 Analyse les tendances du marché technologique",
180
+ "⚡ Crée un script pour automatiser une tâche répétitive",
181
+ "🌐 Trouve des ressources open source pour machine learning",
182
+ ],
183
+ theme="soft"
184
+ )
185
+
186
+ # Add the status box
187
+ with interface:
188
  with gr.Row():
189
+ with gr.Column():
190
+ status_box = gr.Textbox(
191
+ label="Agent Status",
192
+ value="Ready",
193
+ interactive=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  )
 
 
 
 
 
 
 
195
 
196
+ # Use the Interface for status monitoring
197
+ gr.Interface(
198
+ fn=status_monitor,
199
+ inputs=None,
200
+ outputs=status_box,
201
+ live=True,
202
+ show_progress=False
203
+ )
204
+
205
+ return interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
+ # Initialize the components
208
+ llm, manager_agent = initialize_llm_and_agent()
 
 
209
 
210
  # --- Launch the Application ---
211
  if __name__ == "__main__":
 
 
212
  print("🚀 Starting Gradio ALITA Chat Application...")
213
+
214
+ # Créer et lancer l'interface
215
+ app = create_interface()
216
+
217
  try:
218
+ app.launch(
219
+ share=False,
220
+ server_name="127.0.0.1",
221
+ server_port=7825,
222
+ show_error=True
223
  )
224
  except KeyboardInterrupt:
225
  print("\n👋 Application stopped by user")
226
  except Exception as e:
227
  print(f"\n❌ Error launching application: {e}")
228
  traceback.print_exc()
229
+
 
 
 
 
230
  print("✅ Gradio application stopped.")