kshitijthakkar commited on
Commit
17d4110
Β·
1 Parent(s): cbdcdb1

Docker file fix

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. main.py +161 -0
README.md CHANGED
@@ -160,7 +160,7 @@ This project was specifically designed for Gradio Agents & MCP Hackathon featur
160
  - **Hyperbolic** - High-performance AI inference platform
161
 
162
 
163
- ### 🎯 Hackathon Categories
164
 
165
  - **πŸ€– AI/ML Innovation** - Novel use of agents for IT education
166
  - **🎨 Creative Tech** - Unique combination of technical analysis and storytelling
 
160
  - **Hyperbolic** - High-performance AI inference platform
161
 
162
 
163
+ ### 🎯 **System Features**
164
 
165
  - **πŸ€– AI/ML Innovation** - Novel use of agents for IT education
166
  - **🎨 Creative Tech** - Unique combination of technical analysis and storytelling
main.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py
2
+ import asyncio
3
+ import uvicorn
4
+ from threading import Thread
5
+ import requests
6
+ import logging
7
+ from mcp_server import app as mcp_server
8
+ #from gradio_app import start_gradio
9
+
10
+ # Configure logging
11
+ logging.basicConfig(level=logging.INFO)
12
+ logger = logging.getLogger(__name__)
13
+
14
+ # Function to run FastAPI
15
+ async def run_mcp_server():
16
+ config = uvicorn.Config(app=mcp_server, host="0.0.0.0", port=8000)
17
+ server = uvicorn.Server(config)
18
+ logger.info("Starting mcp_server...")
19
+ await server.serve()
20
+
21
+ import base64
22
+ import os
23
+ import gradio as gr
24
+ from mcp import ClientSession, StdioServerParameters, types
25
+ from mcp.client.stdio import stdio_client
26
+ from smolagents import ToolCollection, CodeAgent, load_tool, tool, ToolCallingAgent, InferenceClientModel
27
+ from smolagents.mcp_client import MCPClient
28
+ from smolagents import TransformersModel
29
+ from dotenv import load_dotenv
30
+ import yaml
31
+ import requests
32
+ import json
33
+ from PIL import Image
34
+ from datetime import datetime
35
+ from outage_odyssey_ui import GradioUI
36
+ from io import BytesIO
37
+ import time
38
+ def run_outage_odyssey_app():
39
+ """Run the Outage Odyssey Gradio App."""
40
+ # Load environment variables
41
+ load_dotenv()
42
+ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
43
+ ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
44
+ CODEASTREAL_API_KEY = os.getenv("CODEASTREAL_API_KEY")
45
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
46
+ HF_TOKEN = os.getenv("HF_TOKEN")
47
+ USE_CLOUD_MODEL = os.getenv("USE_CLOUD_MODEL", "true")
48
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
49
+
50
+ # Select model
51
+ if USE_CLOUD_MODEL == 'true':
52
+ from smolagents import LiteLLMModel
53
+ model = InferenceClientModel(
54
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
55
+ provider="hf-inference",
56
+ api_key=HF_TOKEN,
57
+ )
58
+ model_description = "This agent uses MCP tools and LLM Models using LiteLLMModel via API."
59
+ else:
60
+ from transformers import pipeline
61
+ print("Loading local Qwen model...")
62
+ model = TransformersModel(
63
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
64
+ device_map='auto',
65
+ max_new_tokens=8192,
66
+ trust_remote_code=True
67
+ )
68
+ model_description = "This agent uses MCP tools and a locally-run Qwen3-4B model."
69
+
70
+ print(model_description)
71
+
72
+ # Define tool to convert PIL image to base64
73
+ @tool
74
+ def pil_to_base64(pil_image: Image.Image) -> str:
75
+ """Convert PIL image to base64."""
76
+ buffer = BytesIO()
77
+ pil_image.save(buffer, format="PNG")
78
+ img_str = base64.b64encode(buffer.getvalue()).decode()
79
+ return f"data:image/png;base64,{img_str}"
80
+
81
+ mcp_client = None
82
+ try:
83
+ # Connect to MCP Server
84
+ mcp_client = MCPClient({"url": "http://localhost:8000/sse"})
85
+ tools = mcp_client.get_tools()
86
+
87
+ tools_array = [{
88
+ "name": tool.name,
89
+ "description": tool.description,
90
+ "inputs": tool.inputs,
91
+ "output_type": tool.output_type,
92
+ "is_initialized": tool.is_initialized
93
+ } for tool in tools]
94
+
95
+ tool_names = [tool["name"] for tool in tools_array]
96
+ print(f"Connected to MCP server. Available tools: {', '.join(tool_names)}")
97
+
98
+ # Load prompt templates
99
+ with open("prompts.yml", 'r', encoding='utf-8') as stream:
100
+ prompt_templates = yaml.safe_load(stream)
101
+
102
+ # Create Agent
103
+ agent = CodeAgent(
104
+ tools=[pil_to_base64, *tools],
105
+ model=model,
106
+ prompt_templates=prompt_templates,
107
+ max_steps=10,
108
+ planning_interval=5,
109
+ additional_authorized_imports=[
110
+ 'time', 'math', 'queue', 're', 'stat', 'collections', 'datetime', 'statistics',
111
+ 'itertools', 'unicodedata', 'random', 'matplotlib.pyplot', 'open', 'pandas', 'numpy',
112
+ 'json', 'yaml', 'plotly', 'pillow', 'PIL', 'base64', 'io'
113
+ ]
114
+ )
115
+
116
+ agent.name = "Outage Odyssey Agent"
117
+
118
+ # Launch Gradio UI
119
+ GradioUI(agent=agent).launch(share=True, mcp_server=True)
120
+
121
+ except Exception as e:
122
+ print(f"Error starting Gradio: {str(e)}")
123
+
124
+ finally:
125
+ if mcp_client is not None:
126
+ mcp_client.disconnect()
127
+ print("MCP client disconnected")
128
+
129
+ # Example usage:
130
+ # run_outage_odyssey_app()
131
+
132
+ # Function to run Gradio after FastAPI is up
133
+ def run_gradio_with_check():
134
+ max_attempts = 10
135
+ for attempt in range(max_attempts):
136
+ try:
137
+ response = requests.get("http://localhost:8000/")
138
+ if response.status_code == 200:
139
+ logger.info("FastAPI is up, starting Gradio...")
140
+ run_outage_odyssey_app()
141
+ return
142
+ except requests.ConnectionError:
143
+ logger.info(f"Waiting for FastAPI... Attempt {attempt + 1}/{max_attempts}")
144
+ time.sleep(1)
145
+ logger.error("Failed to start Gradio: FastAPI not ready")
146
+
147
+ # Main function to control startup
148
+ async def main():
149
+ # Start FastAPI
150
+ fastapi_task = asyncio.create_task(run_mcp_server())
151
+
152
+ # Start Gradio in a separate thread after FastAPI is confirmed running
153
+ gradio_thread = Thread(target=run_gradio_with_check)
154
+ gradio_thread.start()
155
+
156
+ # Wait for FastAPI task to complete
157
+ await fastapi_task
158
+
159
+
160
+ if __name__ == "__main__":
161
+ asyncio.run(main())