Spaces:
Sleeping
Sleeping
suryanshp1
commited on
Commit
Β·
a173b58
1
Parent(s):
58459b2
feat: langfuse
Browse files- .env.example +13 -1
- LANGFUSE_SETUP.md +145 -0
- docker-compose.app-only.yml +29 -0
- docker-compose.yml +47 -0
- requirements.txt +4 -1
- setup_langfuse.py +91 -0
- src/langgraphagenticai/__pycache__/main.cpython-312.pyc +0 -0
- src/langgraphagenticai/graph/__pycache__/graph_builder.cpython-312.pyc +0 -0
- src/langgraphagenticai/graph/graph_builder.py +36 -1
- src/langgraphagenticai/llms/__pycache__/groqllm.cpython-312.pyc +0 -0
- src/langgraphagenticai/llms/groqllm.py +19 -2
- src/langgraphagenticai/main.py +14 -1
- src/langgraphagenticai/mcp/__init__.py +1 -0
- src/langgraphagenticai/mcp/mcp_client.py +120 -0
- src/langgraphagenticai/mcp/mcp_config.py +85 -0
- src/langgraphagenticai/monitoring/__init__.py +1 -0
- src/langgraphagenticai/monitoring/__pycache__/__init__.cpython-312.pyc +0 -0
- src/langgraphagenticai/monitoring/__pycache__/dashboard.cpython-312.pyc +0 -0
- src/langgraphagenticai/monitoring/__pycache__/langfuse_integration.cpython-312.pyc +0 -0
- src/langgraphagenticai/monitoring/dashboard.py +96 -0
- src/langgraphagenticai/monitoring/langfuse_integration.py +146 -0
- src/langgraphagenticai/nodes/__pycache__/mcp_chatbot_node.cpython-312.pyc +0 -0
- src/langgraphagenticai/nodes/mcp_chatbot_node.py +28 -0
- src/langgraphagenticai/tools/__pycache__/mcp_tools.cpython-312.pyc +0 -0
- src/langgraphagenticai/tools/mcp_tools.py +141 -0
- src/langgraphagenticai/ui/streamlitui/__pycache__/display_result.cpython-312.pyc +0 -0
- src/langgraphagenticai/ui/streamlitui/__pycache__/loadui.cpython-312.pyc +0 -0
- src/langgraphagenticai/ui/streamlitui/display_result.py +38 -4
- src/langgraphagenticai/ui/streamlitui/loadui.py +55 -0
- src/langgraphagenticai/ui/uiconfigfile.ini +1 -1
.env.example
CHANGED
|
@@ -1,3 +1,15 @@
|
|
| 1 |
# Copy this file to .env and fill in your API keys
|
| 2 |
GROQ_API_KEY=your_groq_api_key_here
|
| 3 |
-
TAVILY_API_KEY=your_tavily_api_key_here
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# Copy this file to .env and fill in your API keys
|
| 2 |
GROQ_API_KEY=your_groq_api_key_here
|
| 3 |
+
TAVILY_API_KEY=your_tavily_api_key_here
|
| 4 |
+
|
| 5 |
+
# Langfuse Configuration
|
| 6 |
+
LANGFUSE_SECRET_KEY=sk-lf-...
|
| 7 |
+
LANGFUSE_PUBLIC_KEY=pk-lf-...
|
| 8 |
+
LANGFUSE_HOST=http://localhost:3000
|
| 9 |
+
|
| 10 |
+
# Optional Langfuse Settings
|
| 11 |
+
TELEMETRY_ENABLED=true
|
| 12 |
+
LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES=false
|
| 13 |
+
|
| 14 |
+
# Environment Mode (development/production)
|
| 15 |
+
STREAMLIT_ENV=development
|
LANGFUSE_SETUP.md
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Langfuse Monitoring Setup
|
| 2 |
+
|
| 3 |
+
This guide helps you set up Langfuse monitoring for your LangGraph application to track LLM responses, costs, and performance.
|
| 4 |
+
|
| 5 |
+
## π Quick Start
|
| 6 |
+
|
| 7 |
+
### 1. Start Langfuse Services
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
# Start Langfuse database and server
|
| 11 |
+
docker-compose up -d langfuse-db langfuse-server
|
| 12 |
+
|
| 13 |
+
# Or use the setup script
|
| 14 |
+
python setup_langfuse.py
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
### 2. Configure Langfuse
|
| 18 |
+
|
| 19 |
+
1. Open http://localhost:3000 in your browser
|
| 20 |
+
2. Create an account or sign in
|
| 21 |
+
3. Create a new project (or use the default one)
|
| 22 |
+
4. Go to **Settings** β **API Keys**
|
| 23 |
+
5. Copy the **Secret Key** and **Public Key**
|
| 24 |
+
|
| 25 |
+
### 3. Update Environment Variables
|
| 26 |
+
|
| 27 |
+
Add to your `.env` file:
|
| 28 |
+
|
| 29 |
+
```env
|
| 30 |
+
LANGFUSE_SECRET_KEY=sk-lf-your-secret-key-here
|
| 31 |
+
LANGFUSE_PUBLIC_KEY=pk-lf-your-public-key-here
|
| 32 |
+
LANGFUSE_HOST=http://localhost:3000
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
### 4. Start the Application
|
| 36 |
+
|
| 37 |
+
```bash
|
| 38 |
+
# Start all services including your app
|
| 39 |
+
docker-compose up --build
|
| 40 |
+
|
| 41 |
+
# Or start just your app if Langfuse is already running
|
| 42 |
+
docker-compose up langgraph-agenticai
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
## π What Gets Monitored
|
| 46 |
+
|
| 47 |
+
- **LLM Requests & Responses**: All interactions with Groq models
|
| 48 |
+
- **Token Usage**: Input and output tokens for cost calculation
|
| 49 |
+
- **Response Times**: Latency metrics for performance monitoring
|
| 50 |
+
- **User Sessions**: Track user interactions across conversations
|
| 51 |
+
- **Tool Usage**: Monitor MCP and other tool executions
|
| 52 |
+
- **Error Tracking**: Capture and analyze failures
|
| 53 |
+
|
| 54 |
+
## π― Features
|
| 55 |
+
|
| 56 |
+
### In-App Monitoring
|
| 57 |
+
- β
Real-time monitoring status in sidebar
|
| 58 |
+
- β
Direct link to Langfuse dashboard
|
| 59 |
+
- β
Session tracking across conversations
|
| 60 |
+
- β
Automatic cost tracking
|
| 61 |
+
|
| 62 |
+
### Langfuse Dashboard
|
| 63 |
+
- π **Traces**: Detailed view of each conversation
|
| 64 |
+
- π° **Costs**: Token usage and cost breakdown
|
| 65 |
+
- β‘ **Performance**: Response time analytics
|
| 66 |
+
- π **Search**: Find specific interactions
|
| 67 |
+
- π **Analytics**: Usage patterns and trends
|
| 68 |
+
|
| 69 |
+
## π§ Configuration Options
|
| 70 |
+
|
| 71 |
+
### Environment Variables
|
| 72 |
+
|
| 73 |
+
| Variable | Description | Default |
|
| 74 |
+
|----------|-------------|---------|
|
| 75 |
+
| `LANGFUSE_SECRET_KEY` | Your Langfuse secret key | Required |
|
| 76 |
+
| `LANGFUSE_PUBLIC_KEY` | Your Langfuse public key | Required |
|
| 77 |
+
| `LANGFUSE_HOST` | Langfuse server URL | `http://localhost:3000` |
|
| 78 |
+
| `TELEMETRY_ENABLED` | Enable Langfuse telemetry | `true` |
|
| 79 |
+
|
| 80 |
+
### Docker Compose Services
|
| 81 |
+
|
| 82 |
+
- **langfuse-db**: PostgreSQL database for Langfuse
|
| 83 |
+
- **langfuse-server**: Langfuse web application
|
| 84 |
+
- **langgraph-agenticai**: Your main application
|
| 85 |
+
|
| 86 |
+
## π οΈ Troubleshooting
|
| 87 |
+
|
| 88 |
+
### Langfuse Not Starting
|
| 89 |
+
|
| 90 |
+
```bash
|
| 91 |
+
# Check service logs
|
| 92 |
+
docker-compose logs langfuse-server
|
| 93 |
+
docker-compose logs langfuse-db
|
| 94 |
+
|
| 95 |
+
# Restart services
|
| 96 |
+
docker-compose restart langfuse-server langfuse-db
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
### Connection Issues
|
| 100 |
+
|
| 101 |
+
1. Verify environment variables are set correctly
|
| 102 |
+
2. Check that Langfuse is accessible at http://localhost:3000
|
| 103 |
+
3. Ensure API keys are valid and have proper permissions
|
| 104 |
+
|
| 105 |
+
### Monitoring Not Working
|
| 106 |
+
|
| 107 |
+
1. Check the sidebar for monitoring status
|
| 108 |
+
2. Verify Langfuse credentials in `.env` file
|
| 109 |
+
3. Look for error messages in the Streamlit app
|
| 110 |
+
|
| 111 |
+
## π Advanced Usage
|
| 112 |
+
|
| 113 |
+
### Custom Traces
|
| 114 |
+
|
| 115 |
+
```python
|
| 116 |
+
from src.langgraphagenticai.monitoring.langfuse_integration import langfuse_manager
|
| 117 |
+
|
| 118 |
+
# Create custom trace
|
| 119 |
+
trace = langfuse_manager.create_trace(
|
| 120 |
+
name="custom_operation",
|
| 121 |
+
user_id="user123",
|
| 122 |
+
session_id="session456"
|
| 123 |
+
)
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
### Cost Analysis
|
| 127 |
+
|
| 128 |
+
Access detailed cost breakdowns in the Langfuse dashboard:
|
| 129 |
+
1. Go to **Analytics** β **Usage**
|
| 130 |
+
2. Filter by model, user, or time period
|
| 131 |
+
3. Export data for further analysis
|
| 132 |
+
|
| 133 |
+
## π Useful Links
|
| 134 |
+
|
| 135 |
+
- [Langfuse Documentation](https://langfuse.com/docs)
|
| 136 |
+
- [Langfuse GitHub](https://github.com/langfuse/langfuse)
|
| 137 |
+
- [LangChain Integration](https://langfuse.com/docs/integrations/langchain)
|
| 138 |
+
|
| 139 |
+
## π Support
|
| 140 |
+
|
| 141 |
+
If you encounter issues:
|
| 142 |
+
1. Check the troubleshooting section above
|
| 143 |
+
2. Review Docker Compose logs
|
| 144 |
+
3. Consult Langfuse documentation
|
| 145 |
+
4. Open an issue in the project repository
|
docker-compose.app-only.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
# Main Application Only (without Langfuse)
|
| 5 |
+
langgraph-agenticai:
|
| 6 |
+
build:
|
| 7 |
+
context: .
|
| 8 |
+
dockerfile: Dockerfile
|
| 9 |
+
container_name: langgraph-agenticai-app
|
| 10 |
+
ports:
|
| 11 |
+
- "8501:8501"
|
| 12 |
+
environment:
|
| 13 |
+
- GROQ_API_KEY=${GROQ_API_KEY}
|
| 14 |
+
- TAVILY_API_KEY=${TAVILY_API_KEY}
|
| 15 |
+
- STREAMLIT_ENV=production
|
| 16 |
+
volumes:
|
| 17 |
+
- ./src:/app/src
|
| 18 |
+
- ./app.py:/app/app.py
|
| 19 |
+
restart: unless-stopped
|
| 20 |
+
healthcheck:
|
| 21 |
+
test: ["CMD", "curl", "--fail", "http://localhost:8501/_stcore/health"]
|
| 22 |
+
interval: 30s
|
| 23 |
+
timeout: 10s
|
| 24 |
+
retries: 3
|
| 25 |
+
start_period: 40s
|
| 26 |
+
|
| 27 |
+
networks:
|
| 28 |
+
default:
|
| 29 |
+
driver: bridge
|
docker-compose.yml
CHANGED
|
@@ -1,6 +1,46 @@
|
|
| 1 |
version: '3.8'
|
| 2 |
|
| 3 |
services:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
langgraph-agenticai:
|
| 5 |
build:
|
| 6 |
context: .
|
|
@@ -11,6 +51,10 @@ services:
|
|
| 11 |
environment:
|
| 12 |
- GROQ_API_KEY=${GROQ_API_KEY}
|
| 13 |
- TAVILY_API_KEY=${TAVILY_API_KEY}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
volumes:
|
| 15 |
- ./src:/app/src
|
| 16 |
- ./app.py:/app/app.py
|
|
@@ -24,6 +68,9 @@ services:
|
|
| 24 |
networks:
|
| 25 |
- langgraph-network
|
| 26 |
|
|
|
|
|
|
|
|
|
|
| 27 |
networks:
|
| 28 |
langgraph-network:
|
| 29 |
driver: bridge
|
|
|
|
| 1 |
version: '3.8'
|
| 2 |
|
| 3 |
services:
|
| 4 |
+
# Langfuse Database
|
| 5 |
+
langfuse-db:
|
| 6 |
+
image: postgres:15
|
| 7 |
+
container_name: langfuse-postgres
|
| 8 |
+
environment:
|
| 9 |
+
POSTGRES_DB: langfuse
|
| 10 |
+
POSTGRES_USER: langfuse
|
| 11 |
+
POSTGRES_PASSWORD: langfuse_password
|
| 12 |
+
volumes:
|
| 13 |
+
- langfuse_db_data:/var/lib/postgresql/data
|
| 14 |
+
networks:
|
| 15 |
+
- langgraph-network
|
| 16 |
+
restart: unless-stopped
|
| 17 |
+
|
| 18 |
+
# Langfuse Server
|
| 19 |
+
langfuse-server:
|
| 20 |
+
image: langfuse/langfuse:2
|
| 21 |
+
container_name: langfuse-server
|
| 22 |
+
depends_on:
|
| 23 |
+
- langfuse-db
|
| 24 |
+
ports:
|
| 25 |
+
- "3000:3000"
|
| 26 |
+
environment:
|
| 27 |
+
DATABASE_URL: postgresql://langfuse:langfuse_password@langfuse-db:5432/langfuse
|
| 28 |
+
NEXTAUTH_SECRET: mysecret
|
| 29 |
+
SALT: mysalt
|
| 30 |
+
NEXTAUTH_URL: http://localhost:3000
|
| 31 |
+
TELEMETRY_ENABLED: ${TELEMETRY_ENABLED:-true}
|
| 32 |
+
LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: ${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-false}
|
| 33 |
+
networks:
|
| 34 |
+
- langgraph-network
|
| 35 |
+
restart: unless-stopped
|
| 36 |
+
healthcheck:
|
| 37 |
+
test: ["CMD", "curl", "--fail", "http://localhost:3000/api/public/health"]
|
| 38 |
+
interval: 30s
|
| 39 |
+
timeout: 10s
|
| 40 |
+
retries: 3
|
| 41 |
+
start_period: 60s
|
| 42 |
+
|
| 43 |
+
# Main Application
|
| 44 |
langgraph-agenticai:
|
| 45 |
build:
|
| 46 |
context: .
|
|
|
|
| 51 |
environment:
|
| 52 |
- GROQ_API_KEY=${GROQ_API_KEY}
|
| 53 |
- TAVILY_API_KEY=${TAVILY_API_KEY}
|
| 54 |
+
- LANGFUSE_SECRET_KEY=${LANGFUSE_SECRET_KEY:-}
|
| 55 |
+
- LANGFUSE_PUBLIC_KEY=${LANGFUSE_PUBLIC_KEY:-}
|
| 56 |
+
- LANGFUSE_HOST=${LANGFUSE_HOST:-http://langfuse-server:3000}
|
| 57 |
+
- STREAMLIT_ENV=${STREAMLIT_ENV:-development}
|
| 58 |
volumes:
|
| 59 |
- ./src:/app/src
|
| 60 |
- ./app.py:/app/app.py
|
|
|
|
| 68 |
networks:
|
| 69 |
- langgraph-network
|
| 70 |
|
| 71 |
+
volumes:
|
| 72 |
+
langfuse_db_data:
|
| 73 |
+
|
| 74 |
networks:
|
| 75 |
langgraph-network:
|
| 76 |
driver: bridge
|
requirements.txt
CHANGED
|
@@ -5,4 +5,7 @@ langchain_core==0.3.76
|
|
| 5 |
langchain_groq==0.3.8
|
| 6 |
langchain_openai==0.3.33
|
| 7 |
faiss-cpu==1.12.0
|
| 8 |
-
streamlit==1.49.1
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
langchain_groq==0.3.8
|
| 6 |
langchain_openai==0.3.33
|
| 7 |
faiss-cpu==1.12.0
|
| 8 |
+
streamlit==1.49.1
|
| 9 |
+
langchain-mcp-adapters
|
| 10 |
+
mcp
|
| 11 |
+
langfuse
|
setup_langfuse.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Langfuse Setup Script
|
| 4 |
+
Helps users set up Langfuse monitoring for the LangGraph application
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
import requests
|
| 9 |
+
import time
|
| 10 |
+
import subprocess
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def wait_for_langfuse(host="http://localhost:3000", timeout=120):
|
| 14 |
+
"""Wait for Langfuse to be ready"""
|
| 15 |
+
print(f"Waiting for Langfuse at {host}...")
|
| 16 |
+
|
| 17 |
+
start_time = time.time()
|
| 18 |
+
while time.time() - start_time < timeout:
|
| 19 |
+
try:
|
| 20 |
+
response = requests.get(f"{host}/api/public/health", timeout=5)
|
| 21 |
+
if response.status_code == 200:
|
| 22 |
+
print("β
Langfuse is ready!")
|
| 23 |
+
return True
|
| 24 |
+
except requests.exceptions.RequestException:
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
print("β³ Waiting for Langfuse to start...")
|
| 28 |
+
time.sleep(5)
|
| 29 |
+
|
| 30 |
+
print("β Timeout waiting for Langfuse")
|
| 31 |
+
return False
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def create_langfuse_project():
|
| 35 |
+
"""Instructions for creating a Langfuse project"""
|
| 36 |
+
print("\nπ Setting up Langfuse monitoring...")
|
| 37 |
+
print("\nπ Follow these steps:")
|
| 38 |
+
print("1. Open http://localhost:3000 in your browser")
|
| 39 |
+
print("2. Create an account or sign in")
|
| 40 |
+
print("3. Create a new project")
|
| 41 |
+
print("4. Go to Settings > API Keys")
|
| 42 |
+
print("5. Copy the Secret Key and Public Key")
|
| 43 |
+
print("6. Add them to your .env file:")
|
| 44 |
+
print(" LANGFUSE_SECRET_KEY=sk-lf-...")
|
| 45 |
+
print(" LANGFUSE_PUBLIC_KEY=pk-lf-...")
|
| 46 |
+
print(" LANGFUSE_HOST=http://localhost:3000")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def main():
|
| 50 |
+
print("π§ Langfuse Setup for LangGraph Application")
|
| 51 |
+
print("=" * 50)
|
| 52 |
+
|
| 53 |
+
# Check if Docker Compose is available
|
| 54 |
+
try:
|
| 55 |
+
subprocess.run(["docker-compose", "--version"],
|
| 56 |
+
capture_output=True, check=True)
|
| 57 |
+
print("β
Docker Compose is available")
|
| 58 |
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
| 59 |
+
print("β Docker Compose not found. Please install Docker Compose first.")
|
| 60 |
+
return
|
| 61 |
+
|
| 62 |
+
# Start Langfuse services
|
| 63 |
+
print("\nπ³ Starting Langfuse services...")
|
| 64 |
+
try:
|
| 65 |
+
subprocess.run([
|
| 66 |
+
"docker-compose", "up", "-d",
|
| 67 |
+
"langfuse-db", "langfuse-server"
|
| 68 |
+
], check=True)
|
| 69 |
+
print("β
Langfuse services started")
|
| 70 |
+
except subprocess.CalledProcessError as e:
|
| 71 |
+
print(f"β Failed to start services: {e}")
|
| 72 |
+
return
|
| 73 |
+
|
| 74 |
+
# Wait for Langfuse to be ready
|
| 75 |
+
if wait_for_langfuse():
|
| 76 |
+
create_langfuse_project()
|
| 77 |
+
|
| 78 |
+
print("\nπ― Next steps:")
|
| 79 |
+
print("1. Complete the Langfuse setup in your browser")
|
| 80 |
+
print("2. Update your .env file with the API keys")
|
| 81 |
+
print("3. Restart your application: docker-compose up --build")
|
| 82 |
+
print("4. Your LLM calls will now be monitored in Langfuse!")
|
| 83 |
+
|
| 84 |
+
print(f"\nπ Langfuse Dashboard: http://localhost:3000")
|
| 85 |
+
else:
|
| 86 |
+
print("β Failed to start Langfuse. Check Docker logs:")
|
| 87 |
+
print("docker-compose logs langfuse-server")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
main()
|
src/langgraphagenticai/__pycache__/main.cpython-312.pyc
CHANGED
|
Binary files a/src/langgraphagenticai/__pycache__/main.cpython-312.pyc and b/src/langgraphagenticai/__pycache__/main.cpython-312.pyc differ
|
|
|
src/langgraphagenticai/graph/__pycache__/graph_builder.cpython-312.pyc
CHANGED
|
Binary files a/src/langgraphagenticai/graph/__pycache__/graph_builder.cpython-312.pyc and b/src/langgraphagenticai/graph/__pycache__/graph_builder.cpython-312.pyc differ
|
|
|
src/langgraphagenticai/graph/graph_builder.py
CHANGED
|
@@ -4,7 +4,9 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
| 4 |
from src.langgraphagenticai.state.state import State
|
| 5 |
from src.langgraphagenticai.nodes.basic_chatbot_node import BasicChatbotNode
|
| 6 |
from src.langgraphagenticai.nodes.chatbot_with_tool_node import ChatbotWithToolNode
|
|
|
|
| 7 |
from src.langgraphagenticai.tools.search_tool import get_tools, create_tool_node
|
|
|
|
| 8 |
import datetime
|
| 9 |
|
| 10 |
|
|
@@ -45,7 +47,35 @@ class GraphBuilder:
|
|
| 45 |
|
| 46 |
return graph_builder
|
| 47 |
|
| 48 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
if usecase == "Basic Chatbot":
|
| 50 |
graph_builder = self.basic_chatbot_build_graph()
|
| 51 |
elif usecase == "Chatbot with Tool":
|
|
@@ -54,6 +84,11 @@ class GraphBuilder:
|
|
| 54 |
graph_builder = (
|
| 55 |
self.chatbot_with_tools_build_graph()
|
| 56 |
) # AI News also uses tools
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
else:
|
| 58 |
raise ValueError(f"Unknown usecase: {usecase}")
|
| 59 |
|
|
|
|
| 4 |
from src.langgraphagenticai.state.state import State
|
| 5 |
from src.langgraphagenticai.nodes.basic_chatbot_node import BasicChatbotNode
|
| 6 |
from src.langgraphagenticai.nodes.chatbot_with_tool_node import ChatbotWithToolNode
|
| 7 |
+
from src.langgraphagenticai.nodes.mcp_chatbot_node import MCPChatbotNode
|
| 8 |
from src.langgraphagenticai.tools.search_tool import get_tools, create_tool_node
|
| 9 |
+
from src.langgraphagenticai.tools.mcp_tools import create_mcp_tools_from_config
|
| 10 |
import datetime
|
| 11 |
|
| 12 |
|
|
|
|
| 47 |
|
| 48 |
return graph_builder
|
| 49 |
|
| 50 |
+
def mcp_chatbot_build_graph(self, mcp_config: dict):
|
| 51 |
+
"""Build graph for MCP chatbot with MCP tools"""
|
| 52 |
+
graph_builder = StateGraph(State)
|
| 53 |
+
|
| 54 |
+
# Create MCP tools from configuration
|
| 55 |
+
mcp_tools = create_mcp_tools_from_config(mcp_config)
|
| 56 |
+
|
| 57 |
+
if not mcp_tools:
|
| 58 |
+
raise ValueError("No MCP tools could be created from the configuration")
|
| 59 |
+
|
| 60 |
+
# Create tool node
|
| 61 |
+
tool_node = ToolNode(tools=mcp_tools)
|
| 62 |
+
|
| 63 |
+
# Create MCP chatbot node
|
| 64 |
+
mcp_chatbot_node_obj = MCPChatbotNode(self.llm)
|
| 65 |
+
chatbot_node = mcp_chatbot_node_obj.create_chatbot(tools=mcp_tools)
|
| 66 |
+
|
| 67 |
+
# Add nodes
|
| 68 |
+
graph_builder.add_node("chatbot", chatbot_node)
|
| 69 |
+
graph_builder.add_node("tools", tool_node)
|
| 70 |
+
|
| 71 |
+
# Define edges
|
| 72 |
+
graph_builder.add_edge(START, "chatbot")
|
| 73 |
+
graph_builder.add_conditional_edges("chatbot", tools_condition)
|
| 74 |
+
graph_builder.add_edge("tools", "chatbot")
|
| 75 |
+
|
| 76 |
+
return graph_builder
|
| 77 |
+
|
| 78 |
+
def setup_graph(self, usecase: str, **kwargs):
|
| 79 |
if usecase == "Basic Chatbot":
|
| 80 |
graph_builder = self.basic_chatbot_build_graph()
|
| 81 |
elif usecase == "Chatbot with Tool":
|
|
|
|
| 84 |
graph_builder = (
|
| 85 |
self.chatbot_with_tools_build_graph()
|
| 86 |
) # AI News also uses tools
|
| 87 |
+
elif usecase == "MCP Chatbot":
|
| 88 |
+
mcp_config = kwargs.get("mcp_config")
|
| 89 |
+
if not mcp_config:
|
| 90 |
+
raise ValueError("MCP configuration is required for MCP Chatbot")
|
| 91 |
+
graph_builder = self.mcp_chatbot_build_graph(mcp_config)
|
| 92 |
else:
|
| 93 |
raise ValueError(f"Unknown usecase: {usecase}")
|
| 94 |
|
src/langgraphagenticai/llms/__pycache__/groqllm.cpython-312.pyc
CHANGED
|
Binary files a/src/langgraphagenticai/llms/__pycache__/groqllm.cpython-312.pyc and b/src/langgraphagenticai/llms/__pycache__/groqllm.cpython-312.pyc differ
|
|
|
src/langgraphagenticai/llms/groqllm.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
from langchain_groq import ChatGroq
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
class GroqLLM:
|
|
@@ -16,8 +17,24 @@ class GroqLLM:
|
|
| 16 |
st.error("Please enter the GROQ API key and select a model")
|
| 17 |
return None
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
except Exception as e:
|
| 22 |
st.error(f"Error initializing ChatGroq: {str(e)}")
|
| 23 |
print(str(e))
|
|
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
from langchain_groq import ChatGroq
|
| 4 |
+
from src.langgraphagenticai.monitoring.langfuse_integration import create_monitored_llm, get_langfuse_callbacks
|
| 5 |
|
| 6 |
|
| 7 |
class GroqLLM:
|
|
|
|
| 17 |
st.error("Please enter the GROQ API key and select a model")
|
| 18 |
return None
|
| 19 |
|
| 20 |
+
# Create base LLM first (without monitoring)
|
| 21 |
+
llm = ChatGroq(
|
| 22 |
+
api_key=groq_api_key,
|
| 23 |
+
model=selected_groq_model
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# Try to add monitoring, but don't fail if it doesn't work
|
| 27 |
+
try:
|
| 28 |
+
callbacks = get_langfuse_callbacks()
|
| 29 |
+
if callbacks:
|
| 30 |
+
llm.callbacks = callbacks
|
| 31 |
+
|
| 32 |
+
# Wrap with Langfuse monitoring
|
| 33 |
+
monitored_llm = create_monitored_llm(llm)
|
| 34 |
+
return monitored_llm
|
| 35 |
+
except Exception:
|
| 36 |
+
# If monitoring fails, return the base LLM
|
| 37 |
+
return llm
|
| 38 |
except Exception as e:
|
| 39 |
st.error(f"Error initializing ChatGroq: {str(e)}")
|
| 40 |
print(str(e))
|
src/langgraphagenticai/main.py
CHANGED
|
@@ -5,6 +5,11 @@ from src.langgraphagenticai.ui.streamlitui.loadui import LoadStreamlitUI
|
|
| 5 |
from src.langgraphagenticai.llms.groqllm import GroqLLM
|
| 6 |
from src.langgraphagenticai.graph.graph_builder import GraphBuilder
|
| 7 |
from src.langgraphagenticai.ui.streamlitui.display_result import DisplayResultStremlit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
# Main function START
|
|
@@ -53,7 +58,15 @@ def load_langgraph_agenticai_app():
|
|
| 53 |
graph_builder = GraphBuilder(model)
|
| 54 |
|
| 55 |
try:
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
DisplayResultStremlit(
|
| 58 |
usecase, graph, user_message
|
| 59 |
).display_result_on_ui()
|
|
|
|
| 5 |
from src.langgraphagenticai.llms.groqllm import GroqLLM
|
| 6 |
from src.langgraphagenticai.graph.graph_builder import GraphBuilder
|
| 7 |
from src.langgraphagenticai.ui.streamlitui.display_result import DisplayResultStremlit
|
| 8 |
+
# Import monitoring - but don't fail if it's not available
|
| 9 |
+
try:
|
| 10 |
+
from src.langgraphagenticai.monitoring.langfuse_integration import langfuse_manager
|
| 11 |
+
except ImportError:
|
| 12 |
+
langfuse_manager = None
|
| 13 |
|
| 14 |
|
| 15 |
# Main function START
|
|
|
|
| 58 |
graph_builder = GraphBuilder(model)
|
| 59 |
|
| 60 |
try:
|
| 61 |
+
# Pass MCP config if it's an MCP usecase
|
| 62 |
+
if usecase == "MCP Chatbot":
|
| 63 |
+
mcp_config = user_input.get("mcp_config")
|
| 64 |
+
if not mcp_config:
|
| 65 |
+
st.error("MCP configuration is required for MCP Chatbot")
|
| 66 |
+
return
|
| 67 |
+
graph = graph_builder.setup_graph(usecase=usecase, mcp_config=mcp_config)
|
| 68 |
+
else:
|
| 69 |
+
graph = graph_builder.setup_graph(usecase=usecase)
|
| 70 |
DisplayResultStremlit(
|
| 71 |
usecase, graph, user_message
|
| 72 |
).display_result_on_ui()
|
src/langgraphagenticai/mcp/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# MCP Integration Module
|
src/langgraphagenticai/mcp/mcp_client.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MCP Client Manager
|
| 3 |
+
Handles connections to MCP servers and tool management
|
| 4 |
+
"""
|
| 5 |
+
import asyncio
|
| 6 |
+
import subprocess
|
| 7 |
+
from typing import Dict, List, Any, Optional
|
| 8 |
+
import streamlit as st
|
| 9 |
+
from langchain_mcp_adapters import MCPToolkit
|
| 10 |
+
from mcp import ClientSession, StdioServerParameters
|
| 11 |
+
from mcp.client.stdio import stdio_client
|
| 12 |
+
|
| 13 |
+
from .mcp_config import MCPConfig
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class MCPClientManager:
|
| 17 |
+
def __init__(self):
|
| 18 |
+
self.config = MCPConfig()
|
| 19 |
+
self.active_sessions: Dict[str, ClientSession] = {}
|
| 20 |
+
self.toolkits: Dict[str, MCPToolkit] = {}
|
| 21 |
+
|
| 22 |
+
async def connect_to_server(self, server_name: str, server_config: Dict[str, Any]) -> Optional[MCPToolkit]:
|
| 23 |
+
"""Connect to an MCP server and return its toolkit"""
|
| 24 |
+
try:
|
| 25 |
+
# Create server parameters
|
| 26 |
+
server_params = StdioServerParameters(
|
| 27 |
+
command=server_config["command"],
|
| 28 |
+
args=server_config.get("args", []),
|
| 29 |
+
env=server_config.get("env", {})
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Create stdio client
|
| 33 |
+
stdio_transport = await stdio_client(server_params)
|
| 34 |
+
|
| 35 |
+
# Create session
|
| 36 |
+
session = ClientSession(stdio_transport[0], stdio_transport[1])
|
| 37 |
+
|
| 38 |
+
# Initialize session
|
| 39 |
+
await session.initialize()
|
| 40 |
+
|
| 41 |
+
# Store session
|
| 42 |
+
self.active_sessions[server_name] = session
|
| 43 |
+
|
| 44 |
+
# Create toolkit
|
| 45 |
+
toolkit = MCPToolkit(session=session)
|
| 46 |
+
self.toolkits[server_name] = toolkit
|
| 47 |
+
|
| 48 |
+
st.success(f"β
Connected to MCP server: {server_name}")
|
| 49 |
+
return toolkit
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
st.error(f"β Failed to connect to MCP server {server_name}: {e}")
|
| 53 |
+
return None
|
| 54 |
+
|
| 55 |
+
async def connect_all_servers(self) -> Dict[str, MCPToolkit]:
|
| 56 |
+
"""Connect to all enabled MCP servers"""
|
| 57 |
+
enabled_servers = self.config.get_enabled_servers()
|
| 58 |
+
connected_toolkits = {}
|
| 59 |
+
|
| 60 |
+
for server_name, server_config in enabled_servers.items():
|
| 61 |
+
toolkit = await self.connect_to_server(server_name, server_config)
|
| 62 |
+
if toolkit:
|
| 63 |
+
connected_toolkits[server_name] = toolkit
|
| 64 |
+
|
| 65 |
+
return connected_toolkits
|
| 66 |
+
|
| 67 |
+
def get_all_tools(self) -> List[Any]:
|
| 68 |
+
"""Get all tools from all connected MCP servers"""
|
| 69 |
+
all_tools = []
|
| 70 |
+
|
| 71 |
+
for server_name, toolkit in self.toolkits.items():
|
| 72 |
+
try:
|
| 73 |
+
server_config = self.config.get_server_config(server_name)
|
| 74 |
+
disabled_tools = server_config.get("disabledTools", [])
|
| 75 |
+
|
| 76 |
+
# Get tools from toolkit
|
| 77 |
+
tools = toolkit.get_tools()
|
| 78 |
+
|
| 79 |
+
# Filter out disabled tools
|
| 80 |
+
enabled_tools = [
|
| 81 |
+
tool for tool in tools
|
| 82 |
+
if tool.name not in disabled_tools
|
| 83 |
+
]
|
| 84 |
+
|
| 85 |
+
all_tools.extend(enabled_tools)
|
| 86 |
+
st.info(f"π¦ Loaded {len(enabled_tools)} tools from {server_name}")
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
st.warning(f"β οΈ Error getting tools from {server_name}: {e}")
|
| 90 |
+
|
| 91 |
+
return all_tools
|
| 92 |
+
|
| 93 |
+
async def disconnect_all(self):
|
| 94 |
+
"""Disconnect from all MCP servers"""
|
| 95 |
+
for server_name, session in self.active_sessions.items():
|
| 96 |
+
try:
|
| 97 |
+
await session.close()
|
| 98 |
+
st.info(f"π Disconnected from {server_name}")
|
| 99 |
+
except Exception as e:
|
| 100 |
+
st.warning(f"β οΈ Error disconnecting from {server_name}: {e}")
|
| 101 |
+
|
| 102 |
+
self.active_sessions.clear()
|
| 103 |
+
self.toolkits.clear()
|
| 104 |
+
|
| 105 |
+
def is_server_available(self, server_name: str) -> bool:
|
| 106 |
+
"""Check if a server is available and responding"""
|
| 107 |
+
server_config = self.config.get_server_config(server_name)
|
| 108 |
+
if not server_config:
|
| 109 |
+
return False
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
# Try to run the command to see if it's available
|
| 113 |
+
result = subprocess.run(
|
| 114 |
+
[server_config["command"], "--help"],
|
| 115 |
+
capture_output=True,
|
| 116 |
+
timeout=5
|
| 117 |
+
)
|
| 118 |
+
return result.returncode == 0
|
| 119 |
+
except Exception:
|
| 120 |
+
return False
|
src/langgraphagenticai/mcp/mcp_config.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MCP Configuration Management
|
| 3 |
+
Handles loading and managing MCP server configurations
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
from typing import Dict, List, Any, Optional
|
| 9 |
+
import streamlit as st
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MCPConfig:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self.workspace_config_path = ".kiro/settings/mcp.json"
|
| 15 |
+
self.user_config_path = os.path.expanduser("~/.kiro/settings/mcp.json")
|
| 16 |
+
self.config = self._load_config()
|
| 17 |
+
|
| 18 |
+
def _load_config(self) -> Dict[str, Any]:
|
| 19 |
+
"""Load MCP configuration from workspace and user level configs"""
|
| 20 |
+
config = {"mcpServers": {}}
|
| 21 |
+
|
| 22 |
+
# Load user-level config first
|
| 23 |
+
user_config = self._load_config_file(self.user_config_path)
|
| 24 |
+
if user_config and "mcpServers" in user_config:
|
| 25 |
+
config["mcpServers"].update(user_config["mcpServers"])
|
| 26 |
+
|
| 27 |
+
# Load workspace-level config (takes precedence)
|
| 28 |
+
workspace_config = self._load_config_file(self.workspace_config_path)
|
| 29 |
+
if workspace_config and "mcpServers" in workspace_config:
|
| 30 |
+
config["mcpServers"].update(workspace_config["mcpServers"])
|
| 31 |
+
|
| 32 |
+
return config
|
| 33 |
+
|
| 34 |
+
def _load_config_file(self, file_path: str) -> Optional[Dict[str, Any]]:
|
| 35 |
+
"""Load a single MCP config file"""
|
| 36 |
+
try:
|
| 37 |
+
if os.path.exists(file_path):
|
| 38 |
+
with open(file_path, "r") as f:
|
| 39 |
+
return json.load(f)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
st.warning(f"Error loading MCP config from {file_path}: {e}")
|
| 42 |
+
return None
|
| 43 |
+
|
| 44 |
+
def get_enabled_servers(self) -> Dict[str, Dict[str, Any]]:
|
| 45 |
+
"""Get all enabled MCP servers"""
|
| 46 |
+
enabled_servers = {}
|
| 47 |
+
for server_name, server_config in self.config.get("mcpServers", {}).items():
|
| 48 |
+
if not server_config.get("disabled", False):
|
| 49 |
+
enabled_servers[server_name] = server_config
|
| 50 |
+
return enabled_servers
|
| 51 |
+
|
| 52 |
+
def get_server_config(self, server_name: str) -> Optional[Dict[str, Any]]:
|
| 53 |
+
"""Get configuration for a specific server"""
|
| 54 |
+
return self.config.get("mcpServers", {}).get(server_name)
|
| 55 |
+
|
| 56 |
+
def create_default_config(self):
|
| 57 |
+
"""Create a default MCP configuration file"""
|
| 58 |
+
default_config = {
|
| 59 |
+
"mcpServers": {
|
| 60 |
+
"filesystem": {
|
| 61 |
+
"command": "uvx",
|
| 62 |
+
"args": ["mcp-server-filesystem", "/tmp"],
|
| 63 |
+
"disabled": False,
|
| 64 |
+
"autoApprove": [],
|
| 65 |
+
"disabledTools": [],
|
| 66 |
+
},
|
| 67 |
+
"brave-search": {
|
| 68 |
+
"command": "uvx",
|
| 69 |
+
"args": ["mcp-server-brave-search"],
|
| 70 |
+
"env": {"BRAVE_API_KEY": "your_brave_api_key_here"},
|
| 71 |
+
"disabled": True,
|
| 72 |
+
"autoApprove": [],
|
| 73 |
+
"disabledTools": [],
|
| 74 |
+
},
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
# Create directory if it doesn't exist
|
| 79 |
+
os.makedirs(os.path.dirname(self.workspace_config_path), exist_ok=True)
|
| 80 |
+
|
| 81 |
+
# Write default config
|
| 82 |
+
with open(self.workspace_config_path, "w") as f:
|
| 83 |
+
json.dump(default_config, f, indent=2)
|
| 84 |
+
|
| 85 |
+
return default_config
|
src/langgraphagenticai/monitoring/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Monitoring and Analytics Module
|
src/langgraphagenticai/monitoring/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (147 Bytes). View file
|
|
|
src/langgraphagenticai/monitoring/__pycache__/dashboard.cpython-312.pyc
ADDED
|
Binary file (3.57 kB). View file
|
|
|
src/langgraphagenticai/monitoring/__pycache__/langfuse_integration.cpython-312.pyc
ADDED
|
Binary file (5.73 kB). View file
|
|
|
src/langgraphagenticai/monitoring/dashboard.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Monitoring Dashboard Component
|
| 3 |
+
Shows LLM usage statistics and costs in Streamlit
|
| 4 |
+
"""
|
| 5 |
+
import streamlit as st
|
| 6 |
+
import os
|
| 7 |
+
from typing import Dict, Any, List
|
| 8 |
+
from datetime import datetime, timedelta
|
| 9 |
+
from .langfuse_integration import langfuse_manager
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def show_monitoring_dashboard():
|
| 13 |
+
"""Display monitoring dashboard in Streamlit sidebar"""
|
| 14 |
+
try:
|
| 15 |
+
if not langfuse_manager.is_enabled():
|
| 16 |
+
return
|
| 17 |
+
|
| 18 |
+
with st.sidebar:
|
| 19 |
+
st.subheader("π LLM Monitoring")
|
| 20 |
+
|
| 21 |
+
# Dashboard link
|
| 22 |
+
dashboard_url = langfuse_manager.get_dashboard_url()
|
| 23 |
+
st.markdown(f"[π Open Langfuse Dashboard]({dashboard_url})")
|
| 24 |
+
|
| 25 |
+
# Quick stats (if available)
|
| 26 |
+
try:
|
| 27 |
+
# Note: This would require additional Langfuse API calls
|
| 28 |
+
# For now, we'll show basic info
|
| 29 |
+
st.metric("Status", "π’ Active")
|
| 30 |
+
st.metric("Session", st.session_state.get("session_id", "N/A"))
|
| 31 |
+
|
| 32 |
+
except Exception:
|
| 33 |
+
# Silently fail if stats can't be loaded
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
except Exception:
|
| 37 |
+
# Dashboard should never break the main app
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def create_session_id() -> str:
|
| 42 |
+
"""Create a unique session ID for tracking"""
|
| 43 |
+
if "session_id" not in st.session_state:
|
| 44 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 45 |
+
st.session_state.session_id = f"session_{timestamp}"
|
| 46 |
+
|
| 47 |
+
return st.session_state.session_id
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def log_user_interaction(usecase: str, user_message: str, response: str):
|
| 51 |
+
"""Log user interaction for monitoring - fails silently if monitoring unavailable"""
|
| 52 |
+
try:
|
| 53 |
+
if not langfuse_manager.is_enabled():
|
| 54 |
+
return
|
| 55 |
+
|
| 56 |
+
session_id = create_session_id()
|
| 57 |
+
|
| 58 |
+
# Create trace for this interaction
|
| 59 |
+
trace = langfuse_manager.create_trace(
|
| 60 |
+
name=f"chat_interaction_{usecase}",
|
| 61 |
+
session_id=session_id,
|
| 62 |
+
user_id="streamlit_user",
|
| 63 |
+
metadata={
|
| 64 |
+
"usecase": usecase,
|
| 65 |
+
"timestamp": datetime.now().isoformat()
|
| 66 |
+
}
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
if trace:
|
| 70 |
+
# Log the generation
|
| 71 |
+
langfuse_manager.log_generation(
|
| 72 |
+
trace_id=trace.id,
|
| 73 |
+
model="groq_llm",
|
| 74 |
+
input_text=user_message,
|
| 75 |
+
output_text=response,
|
| 76 |
+
metadata={
|
| 77 |
+
"usecase": usecase,
|
| 78 |
+
"session_id": session_id
|
| 79 |
+
}
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
except Exception:
|
| 83 |
+
# Silently fail - logging should never break the app
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def show_cost_tracking():
|
| 88 |
+
"""Show cost tracking information"""
|
| 89 |
+
try:
|
| 90 |
+
if not langfuse_manager.is_enabled():
|
| 91 |
+
return
|
| 92 |
+
|
| 93 |
+
st.info("π° Cost tracking is enabled via Langfuse. View detailed costs in the dashboard.")
|
| 94 |
+
except Exception:
|
| 95 |
+
# Silently fail if cost tracking info can't be shown
|
| 96 |
+
pass
|
src/langgraphagenticai/monitoring/langfuse_integration.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Langfuse Integration for LLM Monitoring and Cost Tracking
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
from typing import Optional, Dict, Any
|
| 6 |
+
import streamlit as st
|
| 7 |
+
from langfuse import Langfuse
|
| 8 |
+
from langfuse.langchain import CallbackHandler
|
| 9 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class LangfuseManager:
|
| 13 |
+
"""Manages Langfuse connection and monitoring"""
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.langfuse = None
|
| 17 |
+
self.callback_handler = None
|
| 18 |
+
self._initialize_langfuse()
|
| 19 |
+
|
| 20 |
+
def _initialize_langfuse(self):
|
| 21 |
+
"""Initialize Langfuse client with graceful fallback"""
|
| 22 |
+
try:
|
| 23 |
+
secret_key = os.getenv("LANGFUSE_SECRET_KEY")
|
| 24 |
+
public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
|
| 25 |
+
host = os.getenv("LANGFUSE_HOST", "http://localhost:3000")
|
| 26 |
+
|
| 27 |
+
if not secret_key or not public_key:
|
| 28 |
+
# Silently disable monitoring if keys are not provided
|
| 29 |
+
return
|
| 30 |
+
|
| 31 |
+
# Initialize Langfuse with timeout
|
| 32 |
+
self.langfuse = Langfuse(
|
| 33 |
+
secret_key=secret_key,
|
| 34 |
+
public_key=public_key,
|
| 35 |
+
host=host,
|
| 36 |
+
timeout=5 # 5 second timeout
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Create callback handler for LangChain integration
|
| 40 |
+
self.callback_handler = CallbackHandler(
|
| 41 |
+
secret_key=secret_key,
|
| 42 |
+
public_key=public_key,
|
| 43 |
+
host=host,
|
| 44 |
+
timeout=5
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# Test connection with timeout
|
| 48 |
+
try:
|
| 49 |
+
self.langfuse.auth_check()
|
| 50 |
+
# Only show success message if in development mode
|
| 51 |
+
if os.getenv("STREAMLIT_ENV") != "production":
|
| 52 |
+
st.success("β
Langfuse monitoring enabled")
|
| 53 |
+
except Exception as auth_error:
|
| 54 |
+
# Connection test failed, disable monitoring
|
| 55 |
+
self.langfuse = None
|
| 56 |
+
self.callback_handler = None
|
| 57 |
+
if os.getenv("STREAMLIT_ENV") != "production":
|
| 58 |
+
st.warning(f"β οΈ Langfuse connection failed: {auth_error}. Monitoring disabled.")
|
| 59 |
+
|
| 60 |
+
except Exception as e:
|
| 61 |
+
# Any initialization error should not break the app
|
| 62 |
+
self.langfuse = None
|
| 63 |
+
self.callback_handler = None
|
| 64 |
+
# Only log in development mode
|
| 65 |
+
if os.getenv("STREAMLIT_ENV") != "production":
|
| 66 |
+
st.info("βΉοΈ Langfuse monitoring not available")
|
| 67 |
+
|
| 68 |
+
def get_callback_handler(self) -> Optional[BaseCallbackHandler]:
|
| 69 |
+
"""Get the Langfuse callback handler for LangChain"""
|
| 70 |
+
return self.callback_handler
|
| 71 |
+
|
| 72 |
+
def is_enabled(self) -> bool:
|
| 73 |
+
"""Check if Langfuse monitoring is enabled"""
|
| 74 |
+
return self.langfuse is not None and self.callback_handler is not None
|
| 75 |
+
|
| 76 |
+
def create_trace(self, name: str, user_id: Optional[str] = None,
|
| 77 |
+
session_id: Optional[str] = None, **kwargs) -> Optional[Any]:
|
| 78 |
+
"""Create a new trace for monitoring"""
|
| 79 |
+
if not self.langfuse:
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
return self.langfuse.trace(
|
| 84 |
+
name=name,
|
| 85 |
+
user_id=user_id,
|
| 86 |
+
session_id=session_id,
|
| 87 |
+
**kwargs
|
| 88 |
+
)
|
| 89 |
+
except Exception:
|
| 90 |
+
# Silently fail - monitoring should never break the app
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
def log_generation(self, trace_id: str, model: str, input_text: str,
|
| 94 |
+
output_text: str, **kwargs):
|
| 95 |
+
"""Log a generation event"""
|
| 96 |
+
if not self.langfuse:
|
| 97 |
+
return
|
| 98 |
+
|
| 99 |
+
try:
|
| 100 |
+
self.langfuse.generation(
|
| 101 |
+
trace_id=trace_id,
|
| 102 |
+
name="llm_generation",
|
| 103 |
+
model=model,
|
| 104 |
+
input=input_text,
|
| 105 |
+
output=output_text,
|
| 106 |
+
**kwargs
|
| 107 |
+
)
|
| 108 |
+
except Exception:
|
| 109 |
+
# Silently fail - monitoring should never break the app
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
def get_dashboard_url(self) -> str:
|
| 113 |
+
"""Get the Langfuse dashboard URL"""
|
| 114 |
+
host = os.getenv("LANGFUSE_HOST", "http://localhost:3000")
|
| 115 |
+
return f"{host}/project/default"
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# Global instance
|
| 119 |
+
langfuse_manager = LangfuseManager()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def get_langfuse_callbacks():
|
| 123 |
+
"""Get Langfuse callbacks for LangChain integration"""
|
| 124 |
+
if langfuse_manager.is_enabled():
|
| 125 |
+
return [langfuse_manager.get_callback_handler()]
|
| 126 |
+
return []
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def create_monitored_llm(llm, session_id: Optional[str] = None):
|
| 130 |
+
"""Wrap an LLM with Langfuse monitoring - gracefully falls back if monitoring unavailable"""
|
| 131 |
+
try:
|
| 132 |
+
if not langfuse_manager.is_enabled():
|
| 133 |
+
return llm
|
| 134 |
+
|
| 135 |
+
# Add Langfuse callback to the LLM
|
| 136 |
+
callbacks = get_langfuse_callbacks()
|
| 137 |
+
if callbacks:
|
| 138 |
+
# Create a new LLM instance with callbacks
|
| 139 |
+
llm_with_callbacks = llm.__class__(**llm.__dict__)
|
| 140 |
+
llm_with_callbacks.callbacks = callbacks
|
| 141 |
+
return llm_with_callbacks
|
| 142 |
+
|
| 143 |
+
return llm
|
| 144 |
+
except Exception:
|
| 145 |
+
# If anything goes wrong with monitoring, return original LLM
|
| 146 |
+
return llm
|
src/langgraphagenticai/nodes/__pycache__/mcp_chatbot_node.cpython-312.pyc
ADDED
|
Binary file (1.52 kB). View file
|
|
|
src/langgraphagenticai/nodes/mcp_chatbot_node.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MCP Chatbot Node
|
| 3 |
+
Handles chatbot logic with MCP tools integration
|
| 4 |
+
"""
|
| 5 |
+
from src.langgraphagenticai.state.state import State
|
| 6 |
+
from typing import List, Any
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class MCPChatbotNode:
|
| 10 |
+
"""
|
| 11 |
+
Chatbot logic enhanced with MCP tools integration
|
| 12 |
+
"""
|
| 13 |
+
def __init__(self, model):
|
| 14 |
+
self.llm = model
|
| 15 |
+
|
| 16 |
+
def create_chatbot(self, tools: List[Any]):
|
| 17 |
+
"""
|
| 18 |
+
Returns a chatbot node function with MCP tools
|
| 19 |
+
"""
|
| 20 |
+
llm_with_tools = self.llm.bind_tools(tools)
|
| 21 |
+
|
| 22 |
+
def chatbot_node(state: State):
|
| 23 |
+
"""
|
| 24 |
+
Chatbot logic for processing the input state and returning a response
|
| 25 |
+
"""
|
| 26 |
+
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
| 27 |
+
|
| 28 |
+
return chatbot_node
|
src/langgraphagenticai/tools/__pycache__/mcp_tools.cpython-312.pyc
ADDED
|
Binary file (5.25 kB). View file
|
|
|
src/langgraphagenticai/tools/mcp_tools.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MCP Tools Integration
|
| 3 |
+
Handles MCP server connections and tool creation for LangGraph
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import asyncio
|
| 7 |
+
import subprocess
|
| 8 |
+
from typing import Dict, List, Any, Optional
|
| 9 |
+
import streamlit as st
|
| 10 |
+
from langchain.tools import BaseTool
|
| 11 |
+
from pydantic import BaseModel, Field
|
| 12 |
+
import tempfile
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class MCPTool(BaseTool):
|
| 17 |
+
"""A LangChain tool that wraps MCP functionality"""
|
| 18 |
+
name: str = Field(...)
|
| 19 |
+
description: str = Field(...)
|
| 20 |
+
mcp_command: str = Field(...)
|
| 21 |
+
mcp_args: List[str] = Field(default_factory=list)
|
| 22 |
+
mcp_env: Dict[str, str] = Field(default_factory=dict)
|
| 23 |
+
|
| 24 |
+
def _run(self, query: str) -> str:
|
| 25 |
+
"""Execute the MCP tool"""
|
| 26 |
+
try:
|
| 27 |
+
# Set up environment
|
| 28 |
+
env = os.environ.copy()
|
| 29 |
+
env.update(self.mcp_env)
|
| 30 |
+
|
| 31 |
+
# Create a simple input for the MCP server
|
| 32 |
+
input_data = {
|
| 33 |
+
"method": "tools/call",
|
| 34 |
+
"params": {
|
| 35 |
+
"name": self.name,
|
| 36 |
+
"arguments": {"query": query}
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
# Run the MCP server command
|
| 41 |
+
result = subprocess.run(
|
| 42 |
+
[self.mcp_command] + self.mcp_args,
|
| 43 |
+
input=json.dumps(input_data),
|
| 44 |
+
capture_output=True,
|
| 45 |
+
text=True,
|
| 46 |
+
env=env,
|
| 47 |
+
timeout=30
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
if result.returncode == 0:
|
| 51 |
+
return result.stdout
|
| 52 |
+
else:
|
| 53 |
+
return f"Error: {result.stderr}"
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
return f"Error executing MCP tool: {str(e)}"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def create_mcp_tools_from_config(mcp_config: Dict[str, Any]) -> List[MCPTool]:
|
| 60 |
+
"""Create MCP tools from configuration"""
|
| 61 |
+
tools = []
|
| 62 |
+
|
| 63 |
+
try:
|
| 64 |
+
mcp_servers = mcp_config.get("mcpServers", {})
|
| 65 |
+
|
| 66 |
+
for server_name, server_config in mcp_servers.items():
|
| 67 |
+
if server_config.get("disabled", False):
|
| 68 |
+
continue
|
| 69 |
+
|
| 70 |
+
# Create a generic tool for each MCP server
|
| 71 |
+
tool = MCPTool(
|
| 72 |
+
name=f"mcp_{server_name}",
|
| 73 |
+
description=f"MCP tool for {server_name} server - can handle various queries and operations",
|
| 74 |
+
mcp_command=server_config.get("command", "uvx"),
|
| 75 |
+
mcp_args=server_config.get("args", []),
|
| 76 |
+
mcp_env=server_config.get("env", {})
|
| 77 |
+
)
|
| 78 |
+
tools.append(tool)
|
| 79 |
+
|
| 80 |
+
except Exception as e:
|
| 81 |
+
st.error(f"Error creating MCP tools: {e}")
|
| 82 |
+
|
| 83 |
+
return tools
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def validate_mcp_config(config_text: str) -> Optional[Dict[str, Any]]:
|
| 87 |
+
"""Validate MCP configuration JSON"""
|
| 88 |
+
try:
|
| 89 |
+
config = json.loads(config_text)
|
| 90 |
+
|
| 91 |
+
# Basic validation
|
| 92 |
+
if "mcpServers" not in config:
|
| 93 |
+
st.error("MCP config must contain 'mcpServers' key")
|
| 94 |
+
return None
|
| 95 |
+
|
| 96 |
+
# Validate each server config
|
| 97 |
+
for server_name, server_config in config["mcpServers"].items():
|
| 98 |
+
if "command" not in server_config:
|
| 99 |
+
st.error(f"Server '{server_name}' must have 'command' field")
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
return config
|
| 103 |
+
|
| 104 |
+
except json.JSONDecodeError as e:
|
| 105 |
+
st.error(f"Invalid JSON: {e}")
|
| 106 |
+
return None
|
| 107 |
+
except Exception as e:
|
| 108 |
+
st.error(f"Error validating config: {e}")
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_sample_mcp_config() -> str:
|
| 113 |
+
"""Get a sample MCP configuration"""
|
| 114 |
+
return json.dumps({
|
| 115 |
+
"mcpServers": {
|
| 116 |
+
"filesystem": {
|
| 117 |
+
"command": "uvx",
|
| 118 |
+
"args": ["mcp-server-filesystem", "/tmp"],
|
| 119 |
+
"disabled": False,
|
| 120 |
+
"autoApprove": [],
|
| 121 |
+
"disabledTools": []
|
| 122 |
+
},
|
| 123 |
+
"brave-search": {
|
| 124 |
+
"command": "uvx",
|
| 125 |
+
"args": ["mcp-server-brave-search"],
|
| 126 |
+
"env": {
|
| 127 |
+
"BRAVE_API_KEY": "your_brave_api_key_here"
|
| 128 |
+
},
|
| 129 |
+
"disabled": True,
|
| 130 |
+
"autoApprove": [],
|
| 131 |
+
"disabledTools": []
|
| 132 |
+
},
|
| 133 |
+
"sqlite": {
|
| 134 |
+
"command": "uvx",
|
| 135 |
+
"args": ["mcp-server-sqlite", "--db-path", "/tmp/test.db"],
|
| 136 |
+
"disabled": False,
|
| 137 |
+
"autoApprove": [],
|
| 138 |
+
"disabledTools": []
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
}, indent=2)
|
src/langgraphagenticai/ui/streamlitui/__pycache__/display_result.cpython-312.pyc
CHANGED
|
Binary files a/src/langgraphagenticai/ui/streamlitui/__pycache__/display_result.cpython-312.pyc and b/src/langgraphagenticai/ui/streamlitui/__pycache__/display_result.cpython-312.pyc differ
|
|
|
src/langgraphagenticai/ui/streamlitui/__pycache__/loadui.cpython-312.pyc
CHANGED
|
Binary files a/src/langgraphagenticai/ui/streamlitui/__pycache__/loadui.cpython-312.pyc and b/src/langgraphagenticai/ui/streamlitui/__pycache__/loadui.cpython-312.pyc differ
|
|
|
src/langgraphagenticai/ui/streamlitui/display_result.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
|
| 3 |
import json
|
|
|
|
| 4 |
|
| 5 |
class DisplayResultStremlit:
|
| 6 |
def __init__(self, usecase, graph, user_message):
|
|
@@ -21,8 +22,18 @@ class DisplayResultStremlit:
|
|
| 21 |
st.write(user_message)
|
| 22 |
with st.chat_message("assisstant"):
|
| 23 |
st.write(value["messages"].content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
elif usecase == "Chatbot with Tool" or usecase == "AI News":
|
| 26 |
# Prepare state and invoke the graph
|
| 27 |
initial_state = {"messages": [HumanMessage(content=user_message)]}
|
| 28 |
|
|
@@ -41,15 +52,38 @@ class DisplayResultStremlit:
|
|
| 41 |
for message in messages:
|
| 42 |
if isinstance(message, ToolMessage):
|
| 43 |
with st.chat_message("assistant"):
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
| 45 |
st.write(message.content)
|
| 46 |
elif isinstance(message, AIMessage):
|
| 47 |
if message.tool_calls:
|
| 48 |
with st.chat_message("assistant"):
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
| 50 |
for tool_call in message.tool_calls:
|
| 51 |
-
st.write(f"
|
|
|
|
| 52 |
else:
|
| 53 |
with st.chat_message("assistant"):
|
| 54 |
st.write(message.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
|
| 3 |
import json
|
| 4 |
+
from src.langgraphagenticai.monitoring.dashboard import log_user_interaction, show_cost_tracking
|
| 5 |
|
| 6 |
class DisplayResultStremlit:
|
| 7 |
def __init__(self, usecase, graph, user_message):
|
|
|
|
| 22 |
st.write(user_message)
|
| 23 |
with st.chat_message("assisstant"):
|
| 24 |
st.write(value["messages"].content)
|
| 25 |
+
|
| 26 |
+
# Log interaction for monitoring - fail silently if monitoring unavailable
|
| 27 |
+
try:
|
| 28 |
+
log_user_interaction(
|
| 29 |
+
usecase=usecase,
|
| 30 |
+
user_message=user_message,
|
| 31 |
+
response=value["messages"].content
|
| 32 |
+
)
|
| 33 |
+
except Exception:
|
| 34 |
+
pass
|
| 35 |
|
| 36 |
+
elif usecase == "Chatbot with Tool" or usecase == "AI News" or usecase == "MCP Chatbot":
|
| 37 |
# Prepare state and invoke the graph
|
| 38 |
initial_state = {"messages": [HumanMessage(content=user_message)]}
|
| 39 |
|
|
|
|
| 52 |
for message in messages:
|
| 53 |
if isinstance(message, ToolMessage):
|
| 54 |
with st.chat_message("assistant"):
|
| 55 |
+
if usecase == "MCP Chatbot":
|
| 56 |
+
st.write("π§ **MCP Tool Results:**")
|
| 57 |
+
else:
|
| 58 |
+
st.write("π **Tool Search Results:**")
|
| 59 |
st.write(message.content)
|
| 60 |
elif isinstance(message, AIMessage):
|
| 61 |
if message.tool_calls:
|
| 62 |
with st.chat_message("assistant"):
|
| 63 |
+
if usecase == "MCP Chatbot":
|
| 64 |
+
st.write("π **Calling MCP tool...**")
|
| 65 |
+
else:
|
| 66 |
+
st.write("π§ **Calling search tool...**")
|
| 67 |
for tool_call in message.tool_calls:
|
| 68 |
+
st.write(f"Tool: {tool_call.get('name', 'Unknown')}")
|
| 69 |
+
st.write(f"Query: {tool_call['args'].get('query', 'N/A')}")
|
| 70 |
else:
|
| 71 |
with st.chat_message("assistant"):
|
| 72 |
st.write(message.content)
|
| 73 |
+
|
| 74 |
+
# Log interaction for monitoring - fail silently if monitoring unavailable
|
| 75 |
+
try:
|
| 76 |
+
log_user_interaction(
|
| 77 |
+
usecase=usecase,
|
| 78 |
+
user_message=user_message,
|
| 79 |
+
response=message.content
|
| 80 |
+
)
|
| 81 |
+
except Exception:
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
# Show cost tracking info - fail silently if monitoring unavailable
|
| 85 |
+
try:
|
| 86 |
+
show_cost_tracking()
|
| 87 |
+
except Exception:
|
| 88 |
+
pass
|
| 89 |
|
src/langgraphagenticai/ui/streamlitui/loadui.py
CHANGED
|
@@ -42,6 +42,21 @@ class LoadStreamlitUI:
|
|
| 42 |
|
| 43 |
|
| 44 |
with st.sidebar:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
# Get options from config
|
| 46 |
llm_options = self.config.get_llm_options()
|
| 47 |
usecase_options = self.config.get_usecase_options()
|
|
@@ -88,6 +103,46 @@ class LoadStreamlitUI:
|
|
| 88 |
else :
|
| 89 |
st.session_state.IsFetchButtonClicked = False
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
if "state" not in st.session_state:
|
| 92 |
st.session_state.state = self.initialize_session()
|
| 93 |
|
|
|
|
| 42 |
|
| 43 |
|
| 44 |
with st.sidebar:
|
| 45 |
+
# Langfuse monitoring status - fail silently if monitoring unavailable
|
| 46 |
+
try:
|
| 47 |
+
from src.langgraphagenticai.monitoring.langfuse_integration import langfuse_manager
|
| 48 |
+
if langfuse_manager.is_enabled():
|
| 49 |
+
st.success("π Langfuse Monitoring: ON")
|
| 50 |
+
dashboard_url = langfuse_manager.get_dashboard_url()
|
| 51 |
+
st.markdown(f"[π View Dashboard]({dashboard_url})")
|
| 52 |
+
else:
|
| 53 |
+
st.info("π Langfuse Monitoring: OFF")
|
| 54 |
+
except Exception:
|
| 55 |
+
# If monitoring status can't be determined, don't show anything
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
st.divider()
|
| 59 |
+
|
| 60 |
# Get options from config
|
| 61 |
llm_options = self.config.get_llm_options()
|
| 62 |
usecase_options = self.config.get_usecase_options()
|
|
|
|
| 103 |
else :
|
| 104 |
st.session_state.IsFetchButtonClicked = False
|
| 105 |
|
| 106 |
+
elif self.user_controls["selected_usecase"] == "MCP Chatbot":
|
| 107 |
+
st.subheader("π§ MCP Configuration")
|
| 108 |
+
|
| 109 |
+
# Sample config button
|
| 110 |
+
if st.button("π Load Sample Config"):
|
| 111 |
+
from src.langgraphagenticai.tools.mcp_tools import get_sample_mcp_config
|
| 112 |
+
st.session_state.mcp_config_text = get_sample_mcp_config()
|
| 113 |
+
|
| 114 |
+
# MCP Config input
|
| 115 |
+
mcp_config_text = st.text_area(
|
| 116 |
+
"MCP Configuration JSON:",
|
| 117 |
+
value=st.session_state.get("mcp_config_text", ""),
|
| 118 |
+
height=300,
|
| 119 |
+
help="Paste your MCP server configuration JSON here"
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if mcp_config_text:
|
| 123 |
+
st.session_state.mcp_config_text = mcp_config_text
|
| 124 |
+
|
| 125 |
+
# Validate config
|
| 126 |
+
from src.langgraphagenticai.tools.mcp_tools import validate_mcp_config
|
| 127 |
+
validated_config = validate_mcp_config(mcp_config_text)
|
| 128 |
+
|
| 129 |
+
if validated_config:
|
| 130 |
+
self.user_controls["mcp_config"] = validated_config
|
| 131 |
+
st.success("β
MCP configuration is valid!")
|
| 132 |
+
|
| 133 |
+
# Show configured servers
|
| 134 |
+
servers = validated_config.get("mcpServers", {})
|
| 135 |
+
enabled_servers = [name for name, config in servers.items() if not config.get("disabled", False)]
|
| 136 |
+
|
| 137 |
+
if enabled_servers:
|
| 138 |
+
st.info(f"π Enabled MCP servers: {', '.join(enabled_servers)}")
|
| 139 |
+
else:
|
| 140 |
+
st.warning("β οΈ No enabled MCP servers found in configuration")
|
| 141 |
+
else:
|
| 142 |
+
st.error("β Invalid MCP configuration")
|
| 143 |
+
else:
|
| 144 |
+
st.info("π‘ Please provide MCP configuration to use MCP tools")
|
| 145 |
+
|
| 146 |
if "state" not in st.session_state:
|
| 147 |
st.session_state.state = self.initialize_session()
|
| 148 |
|
src/langgraphagenticai/ui/uiconfigfile.ini
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
[DEFAULT]
|
| 2 |
PAGE_TITLE = Langgraph: Build Stateful Agentic AI graph
|
| 3 |
LLM_OPTIONS = Groq
|
| 4 |
-
USECASE_OPTIONS = Basic Chatbot, Chatbot with Tool, AI News
|
| 5 |
GROQ_MODEL_OPTIONS = openai/gpt-oss-120b, meta-llama/llama-guard-4-12b, llama-3.3-70b-versatile, openai/gpt-oss-20b
|
|
|
|
| 1 |
[DEFAULT]
|
| 2 |
PAGE_TITLE = Langgraph: Build Stateful Agentic AI graph
|
| 3 |
LLM_OPTIONS = Groq
|
| 4 |
+
USECASE_OPTIONS = Basic Chatbot, Chatbot with Tool, AI News, MCP Chatbot
|
| 5 |
GROQ_MODEL_OPTIONS = openai/gpt-oss-120b, meta-llama/llama-guard-4-12b, llama-3.3-70b-versatile, openai/gpt-oss-20b
|