Guilherme34 commited on
Commit
aa15bce
·
verified ·
1 Parent(s): e3e9a8b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +55 -0
  2. .env.example +29 -0
  3. .gitignore +46 -0
  4. Dockerfile +41 -0
  5. Dockerfile.web +38 -0
  6. LICENSE +21 -0
  7. README.md +122 -10
  8. deploy.sh +69 -0
  9. docker-compose.production.yml +79 -0
  10. docker-compose.yml +93 -0
  11. next-env.d.ts +5 -0
  12. server/__init__.py +3 -0
  13. server/agents/__init__.py +8 -0
  14. server/agents/execution_agent/__init__.py +16 -0
  15. server/agents/execution_agent/agent.py +123 -0
  16. server/agents/execution_agent/batch_manager.py +193 -0
  17. server/agents/execution_agent/runtime.py +236 -0
  18. server/agents/execution_agent/system_prompt.md +51 -0
  19. server/agents/execution_agent/tasks/__init__.py +30 -0
  20. server/agents/execution_agent/tasks/search_email/__init__.py +15 -0
  21. server/agents/execution_agent/tasks/search_email/email_cleaner.py +5 -0
  22. server/agents/execution_agent/tasks/search_email/gmail_internal.py +79 -0
  23. server/agents/execution_agent/tasks/search_email/schemas.py +122 -0
  24. server/agents/execution_agent/tasks/search_email/system_prompt.py +83 -0
  25. server/agents/execution_agent/tasks/search_email/tool.py +450 -0
  26. server/agents/execution_agent/tools/__init__.py +10 -0
  27. server/agents/execution_agent/tools/gmail.py +548 -0
  28. server/agents/execution_agent/tools/registry.py +36 -0
  29. server/agents/execution_agent/tools/triggers.py +249 -0
  30. server/agents/interaction_agent/__init__.py +18 -0
  31. server/agents/interaction_agent/agent.py +65 -0
  32. server/agents/interaction_agent/runtime.py +404 -0
  33. server/agents/interaction_agent/system_prompt.md +143 -0
  34. server/agents/interaction_agent/tools.py +245 -0
  35. server/app.py +86 -0
  36. server/config.py +96 -0
  37. server/logging_config.py +20 -0
  38. server/models/__init__.py +17 -0
  39. server/models/chat.py +43 -0
  40. server/models/gmail.py +27 -0
  41. server/models/meta.py +27 -0
  42. server/openrouter_client/__init__.py +3 -0
  43. server/openrouter_client/client.py +92 -0
  44. server/requirements.txt +7 -0
  45. server/routes/__init__.py +14 -0
  46. server/routes/chat.py +48 -0
  47. server/routes/gmail.py +28 -0
  48. server/routes/meta.py +54 -0
  49. server/server.py +53 -0
  50. server/services/__init__.py +52 -0
.dockerignore ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environment files with sensitive data
2
+ .env
3
+ .env.*
4
+ !.env.example
5
+
6
+ # Git
7
+ .git
8
+ .gitignore
9
+
10
+ # Documentation
11
+ README.md
12
+ *.md
13
+
14
+ # Docker files
15
+ Dockerfile*
16
+ docker-compose*.yml
17
+ .dockerignore
18
+
19
+ # Development files
20
+ .vscode/
21
+ .idea/
22
+
23
+ # OS generated files
24
+ .DS_Store
25
+ .DS_Store?
26
+ ._*
27
+ .Spotlight-V100
28
+ .Trashes
29
+ ehthumbs.db
30
+ Thumbs.db
31
+
32
+ # Logs
33
+ *.log
34
+ logs/
35
+
36
+ # Runtime data that shouldn't be in image
37
+ server/data/
38
+ web/.next/
39
+
40
+ # Dependencies that are installed in container
41
+ node_modules/
42
+ __pycache__/
43
+ *.pyc
44
+ *.pyo
45
+ *.pyd
46
+ .Python
47
+
48
+ # Testing
49
+ coverage/
50
+ .pytest_cache/
51
+ .coverage
52
+
53
+ # Temporary files
54
+ tmp/
55
+ temp/
.env.example ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # API Configuration
2
+ API_BASE_URL=https://api.friendli.ai/dedicated/v1
3
+ API_KEY=your_api_key_here
4
+
5
+ # Composio Configuration
6
+ COMPOSIO_API_KEY=your_composio_api_key_here
7
+
8
+ # Model Configuration
9
+ INTERACTION_AGENT_MODEL=your_interaction_model_here
10
+ EXECUTION_AGENT_MODEL=your_execution_model_here
11
+ EXECUTION_SEARCH_AGENT_MODEL=your_search_model_here
12
+ SUMMARIZER_MODEL=your_summarizer_model_here
13
+ EMAIL_CLASSIFIER_MODEL=your_classifier_model_here
14
+
15
+ # Application Configuration
16
+ OPENPOKE_HOST=0.0.0.0
17
+ OPENPOKE_PORT=8001
18
+ OPENPOKE_CORS_ALLOW_ORIGINS=*
19
+ OPENPOKE_ENABLE_DOCS=1
20
+
21
+ # Web Application Configuration
22
+ NEXT_PUBLIC_API_URL=http://localhost:8001
23
+
24
+ # Instructions:
25
+ # 1. Copy this file to .env: cp .env.example .env
26
+ # 2. Replace all placeholder values with your actual credentials
27
+ # 3. Never commit the .env file to version control
28
+ # 4. Add .env to your .gitignore file
29
+ # 5. Use docker-compose --env-file .env up for production deployments
.gitignore ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Node / Next.js
2
+ node_modules/
3
+ .next/
4
+ web/node_modules/
5
+ web/.next/
6
+
7
+ # Logs
8
+ npm-debug.log*
9
+ yarn-debug.log*
10
+ yarn-error.log*
11
+ *.log
12
+ .server.log
13
+ .server.pid
14
+
15
+ # Envs
16
+ .env
17
+ .env.local
18
+ .env.*.local
19
+
20
+ # OS
21
+ .DS_Store
22
+
23
+ # Python
24
+ .venv/
25
+ __pycache__/
26
+ *.pyc
27
+
28
+ # Database files
29
+ *.db
30
+ *.db-shm
31
+ *.db-wal
32
+
33
+ # Data folder
34
+ server/data/
35
+ data/
36
+
37
+ # Python virtual environment
38
+ server/venv/
39
+
40
+ # Build metadata
41
+ *.tsbuildinfo
42
+ /package-lock.json
43
+
44
+ # Generated documentation / analysis artifacts
45
+ server/repomix-output.txt
46
+ server/plans/
Dockerfile ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.11 slim image for smaller size
2
+ FROM python:3.11-slim
3
+
4
+ # Create a non-root user for security
5
+ RUN groupadd -r appuser && useradd -r -g appuser appuser
6
+
7
+ # Set working directory
8
+ WORKDIR /app
9
+
10
+ # Install system dependencies with security considerations
11
+ RUN apt-get update && apt-get install -y \
12
+ gcc \
13
+ && rm -rf /var/lib/apt/lists/* \
14
+ && apt-get clean
15
+
16
+ # Copy requirements first for better Docker layer caching
17
+ COPY server/requirements.txt .
18
+
19
+ # Install Python dependencies
20
+ RUN pip install --no-cache-dir --upgrade pip && \
21
+ pip install --no-cache-dir -r requirements.txt
22
+
23
+ # Copy server code
24
+ COPY server/ ./server/
25
+
26
+ # Create necessary directories and set permissions
27
+ RUN mkdir -p /app/logs && \
28
+ chown -R appuser:appuser /app
29
+
30
+ # Switch to non-root user
31
+ USER appuser
32
+
33
+ # Expose port
34
+ EXPOSE 8001
35
+
36
+ # Add health check
37
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
38
+ CMD curl -f http://localhost:8001/health || exit 1
39
+
40
+ # Start the server with proper configuration for graceful shutdown
41
+ CMD ["uvicorn", "server.server:app", "--host", "0.0.0.0", "--port", "8001", "--access-log"]
Dockerfile.web ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Node.js 18 LTS Alpine for smaller size
2
+ FROM node:18-alpine
3
+
4
+ # Create a non-root user for security
5
+ RUN addgroup -g 1001 -S nodejs && \
6
+ adduser -S nextjs -u 1001
7
+
8
+ # Set working directory
9
+ WORKDIR /app
10
+
11
+ # Copy package files first for better layer caching
12
+ COPY web/package*.json ./
13
+
14
+ # Install dependencies (use npm ci for faster, reliable builds)
15
+ RUN npm ci --only=production && npm cache clean --force
16
+
17
+ # Copy web code
18
+ COPY web/ .
19
+
20
+ # Build the application
21
+ RUN npm run build
22
+
23
+ # Create necessary directories and set permissions
24
+ RUN mkdir -p /app/.next/cache && \
25
+ chown -R nextjs:nodejs /app
26
+
27
+ # Switch to non-root user
28
+ USER nextjs
29
+
30
+ # Expose port
31
+ EXPOSE 3000
32
+
33
+ # Add health check
34
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
35
+ CMD wget --no-verbose --tries=1 --spider http://localhost:3000 || exit 1
36
+
37
+ # Start the application with proper configuration
38
+ CMD ["npm", "start"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 OpenPoke Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,10 +1,122 @@
1
- ---
2
- title: Guilherme34 Openpokespace
3
- emoji: 📈
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenPoke 🌴
2
+
3
+ test
4
+ OpenPoke is a simplified, open-source take on [Interaction Company’s](https://interaction.co/about) [Poke](https://poke.com/) assistant—built to show how a multi-agent orchestration stack can feel genuinely useful. It keeps the handful of things Poke is great at (email triage, reminders, and persistent agents) while staying easy to spin up locally.
5
+
6
+ - Multi-agent FastAPI backend that mirrors Poke's interaction/execution split, powered by OpenAI-compatible APIs.
7
+ - Gmail tooling via [Composio](https://composio.dev/) for drafting/replying/forwarding without leaving chat.
8
+ - Trigger scheduler and background watchers for reminders and "important email" alerts.
9
+ - Next.js web UI that proxies everything through the shared `.env`, so plugging in API keys is the only setup.
10
+
11
+ ## Requirements
12
+ - Python 3.10+
13
+ - Node.js 18+
14
+ - npm 9+
15
+
16
+ ## Quickstart
17
+ 1. **Clone and enter the repo.**
18
+ ```bash
19
+ git clone https://github.com/shlokkhemani/OpenPoke
20
+ cd OpenPoke
21
+ ```
22
+ 2. **Create a shared env file.** Copy the template and open it in your editor:
23
+ ```bash
24
+ cp .env.example .env
25
+ ```
26
+ 3. **Get your API keys and add them to `.env`:**
27
+
28
+ **API Configuration (Required)**
29
+ - Configure your OpenAI-compatible API endpoint and API key in `.env`
30
+ - Set `API_BASE_URL` to your API endpoint (e.g., `https://api.friendli.ai/dedicated/v1`)
31
+ - Set `API_KEY` to your API key
32
+ - All agent models can be configured via environment variables
33
+
34
+ **Composio (Required for Gmail)**
35
+ - Sign in at [composio.dev](https://composio.dev/)
36
+ - Create an API key
37
+ - Set up Gmail integration and get your auth config ID
38
+ - Replace `your_composio_api_key_here` and `your_gmail_auth_config_id_here` in `.env`
39
+
40
+ ## 🚀 Quick Start (Docker - Recommended)
41
+
42
+ If you have Docker and docker-compose installed, you can get started immediately:
43
+
44
+ ```bash
45
+ # Deploy with one command (includes security setup)
46
+ ./deploy.sh
47
+
48
+ # Or manually with environment variables
49
+ docker-compose --env-file .env up --build -d
50
+ ```
51
+
52
+ This will start both the API server (port 8001) and web UI (port 3000).
53
+
54
+ ### Production Deployment
55
+
56
+ For production deployments:
57
+
58
+ ```bash
59
+ # Use production environment file
60
+ docker-compose --env-file .env.production up --build -d
61
+
62
+ # Or use specific environment file
63
+ docker-compose --env-file .env.staging up --build -d
64
+ ```
65
+
66
+ ### Docker Features
67
+
68
+ - **Security**: Non-root containers with proper user isolation
69
+ - **Health Checks**: Built-in monitoring for service availability
70
+ - **Resource Limits**: CPU and memory constraints for stable performance
71
+ - **Logging**: Structured JSON logging with rotation
72
+ - **Networks**: Isolated network for service communication
73
+ - **Volumes**: Persistent storage for logs and runtime data
74
+
75
+ ## 🛠️ Manual Setup (Alternative)
76
+
77
+ 4. **(Required) Create and activate a Python 3.10+ virtualenv:**
78
+ ```bash
79
+ # Ensure you're using Python 3.10+
80
+ python3.10 -m venv .venv
81
+ source .venv/bin/activate
82
+
83
+ # Verify Python version (should show 3.10+)
84
+ python --version
85
+ ```
86
+ On Windows (PowerShell):
87
+ ```powershell
88
+ # Use Python 3.10+ (adjust path as needed)
89
+ python3.10 -m venv .venv
90
+ .\.venv\Scripts\Activate.ps1
91
+
92
+ # Verify Python version
93
+ python --version
94
+ ```
95
+
96
+ 5. **Install backend dependencies:**
97
+ ```bash
98
+ pip install -r server/requirements.txt
99
+ ```
100
+ 6. **Install frontend dependencies:**
101
+ ```bash
102
+ npm install --prefix web
103
+ ```
104
+ 7. **Start the FastAPI server:**
105
+ ```bash
106
+ python -m server.server --reload
107
+ ```
108
+ 8. **Start the Next.js app (new terminal):**
109
+ ```bash
110
+ npm run dev --prefix web
111
+ ```
112
+ 9. **Connect Gmail for email workflows.** With both services running, open [http://localhost:3000](http://localhost:3000), head to *Settings → Gmail*, and complete the Composio OAuth flow. This step is required for email drafting, replies, and the important-email monitor.
113
+
114
+ The web app proxies API calls to the Python server using the values in `.env`, so keeping both processes running is required for end-to-end flows.
115
+
116
+ ## Project Layout
117
+ - `server/` – FastAPI application and agents
118
+ - `web/` – Next.js app
119
+ - `server/data/` – runtime data (ignored by git)
120
+
121
+ ## License
122
+ MIT — see [LICENSE](LICENSE).
deploy.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # OpenPoke Deployment Script
4
+ echo "🚀 Deploying OpenPoke with FriendliAI integration..."
5
+
6
+ # Check if .env file exists
7
+ if [ ! -f ".env" ]; then
8
+ echo "⚠️ Warning: .env file not found!"
9
+ echo "📝 Please copy .env.example to .env and configure your API keys:"
10
+ echo " cp .env.example .env"
11
+ echo " # Then edit .env with your actual credentials"
12
+ echo ""
13
+ echo "🔄 Continuing with default configuration (you may need to configure API keys manually)..."
14
+ fi
15
+
16
+ # Check if Docker is installed
17
+ if ! command -v docker &> /dev/null; then
18
+ echo "❌ Docker is not installed. Please install Docker first."
19
+ exit 1
20
+ fi
21
+
22
+ # Check if docker-compose is installed
23
+ if command -v docker-compose &> /dev/null; then
24
+ COMPOSE_CMD="docker-compose"
25
+ elif docker compose version &> /dev/null; then
26
+ COMPOSE_CMD="docker compose"
27
+ else
28
+ echo "❌ docker-compose is not installed. Please install docker-compose (or enable the Docker Compose plugin) first."
29
+ exit 1
30
+ fi
31
+
32
+ # Stop any existing containers
33
+ echo "🛑 Stopping existing containers..."
34
+ ${COMPOSE_CMD} down
35
+
36
+ # Build and start the services
37
+ echo "🔨 Building and starting services..."
38
+ if [ -f ".env" ]; then
39
+ ${COMPOSE_CMD} --env-file .env up --build -d
40
+ else
41
+ ${COMPOSE_CMD} up --build -d
42
+ fi
43
+
44
+ # Wait for services to be ready
45
+ echo "⏳ Waiting for services to start..."
46
+ sleep 15
47
+
48
+ # Check if services are running
49
+ if ${COMPOSE_CMD} ps | grep -q "Up"; then
50
+ echo "✅ Deployment successful!"
51
+ echo ""
52
+ echo "🌐 Services are running:"
53
+ echo " - Server API: http://localhost:8001"
54
+ echo " - Web UI: http://localhost:3000"
55
+ echo ""
56
+ echo "📖 Check the logs with: ${COMPOSE_CMD} logs -f"
57
+ echo "🔍 View service status: ${COMPOSE_CMD} ps"
58
+ echo "🛑 Stop with: ${COMPOSE_CMD} down"
59
+ echo ""
60
+ echo "💡 Tip: If you encounter API key issues, edit your .env file with correct credentials"
61
+ else
62
+ echo "❌ Deployment failed. Check the logs with: ${COMPOSE_CMD} logs"
63
+ echo ""
64
+ echo "🔧 Troubleshooting tips:"
65
+ echo " 1. Check if your .env file has valid API keys"
66
+ echo " 2. Verify Docker has enough resources"
67
+ echo " 3. Check firewall settings"
68
+ exit 1
69
+ fi
docker-compose.production.yml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ # Production overrides for docker-compose.yml
4
+ # Usage: docker-compose -f docker-compose.yml -f docker-compose.production.yml up
5
+
6
+ services:
7
+ server:
8
+ # Production-specific configurations
9
+ environment:
10
+ - OPENPOKE_CORS_ALLOW_ORIGINS=${PRODUCTION_CORS_ORIGINS:-https://yourdomain.com}
11
+ - OPENPOKE_ENABLE_DOCS=0 # Disable docs in production
12
+ deploy:
13
+ resources:
14
+ limits:
15
+ cpus: '1.0'
16
+ memory: 1G
17
+ reservations:
18
+ cpus: '0.5'
19
+ memory: 512M
20
+ restart_policy:
21
+ condition: on-failure
22
+ delay: 5s
23
+ max_attempts: 3
24
+ logging:
25
+ driver: "json-file"
26
+ options:
27
+ max-size: "50m"
28
+ max-file: "5"
29
+
30
+ web:
31
+ # Production-specific configurations
32
+ deploy:
33
+ resources:
34
+ limits:
35
+ cpus: '0.5'
36
+ memory: 512M
37
+ reservations:
38
+ cpus: '0.25'
39
+ memory: 256M
40
+ restart_policy:
41
+ condition: on-failure
42
+ delay: 5s
43
+ max_attempts: 3
44
+ logging:
45
+ driver: "json-file"
46
+ options:
47
+ max-size: "50m"
48
+ max-file: "5"
49
+
50
+ # Optional: Add SSL termination with Traefik
51
+ # traefik:
52
+ # image: traefik:v2.10
53
+ # command:
54
+ # - "--api.dashboard=true"
55
+ # - "--providers.docker=true"
56
+ # - "--providers.docker.exposedbydefault=false"
57
+ # - "--entrypoints.web.address=:80"
58
+ # - "--entrypoints.websecure.address=:443"
59
+ # - "--certificatesresolvers.letsencrypt.acme.httpchallenge=true"
60
+ # - "--certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=web"
61
+ # - "--certificatesresolvers.letsencrypt.acme.email=your-email@example.com"
62
+ # - "--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
63
+ # ports:
64
+ # - "80:80"
65
+ # - "443:443"
66
+ # - "8080:8080" # Traefik dashboard
67
+ # volumes:
68
+ # - /var/run/docker.sock:/var/run/docker.sock:ro
69
+ # - letsencrypt:/letsencrypt
70
+ # networks:
71
+ # - app-network
72
+ # labels:
73
+ # - "traefik.enable=true"
74
+ # - "traefik.http.routers.api.rule=Host(`traefik.yourdomain.com`)"
75
+ # - "traefik.http.routers.api.service=api@internal"
76
+
77
+ # volumes:
78
+ # letsencrypt:
79
+ # driver: local
docker-compose.yml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ # Define networks for better service isolation
4
+ networks:
5
+ app-network:
6
+ driver: bridge
7
+
8
+ # Define volumes for persistent data
9
+ volumes:
10
+ logs:
11
+ driver: local
12
+
13
+ services:
14
+ server:
15
+ build:
16
+ context: .
17
+ dockerfile: Dockerfile
18
+ ports:
19
+ - "8001:8001"
20
+ environment:
21
+ - OPENPOKE_HOST=0.0.0.0
22
+ - OPENPOKE_PORT=8001
23
+ - OPENPOKE_CORS_ALLOW_ORIGINS=*
24
+ - OPENPOKE_ENABLE_DOCS=${OPENPOKE_ENABLE_DOCS:-1}
25
+ # Sensitive environment variables should be loaded from .env file
26
+ - API_BASE_URL=${API_BASE_URL}
27
+ - API_KEY=${API_KEY}
28
+ - COMPOSIO_API_KEY=${COMPOSIO_API_KEY}
29
+ - INTERACTION_AGENT_MODEL=${INTERACTION_AGENT_MODEL}
30
+ - EXECUTION_AGENT_MODEL=${EXECUTION_AGENT_MODEL}
31
+ - EXECUTION_SEARCH_AGENT_MODEL=${EXECUTION_SEARCH_AGENT_MODEL}
32
+ - SUMMARIZER_MODEL=${SUMMARIZER_MODEL}
33
+ - EMAIL_CLASSIFIER_MODEL=${EMAIL_CLASSIFIER_MODEL}
34
+ restart: unless-stopped
35
+ networks:
36
+ - app-network
37
+ volumes:
38
+ - logs:/app/logs
39
+ - ./server:/app/server:ro
40
+ deploy:
41
+ resources:
42
+ limits:
43
+ cpus: '0.50'
44
+ memory: 512M
45
+ reservations:
46
+ cpus: '0.25'
47
+ memory: 256M
48
+ logging:
49
+ driver: "json-file"
50
+ options:
51
+ max-size: "10m"
52
+ max-file: "3"
53
+
54
+ web:
55
+ build:
56
+ context: .
57
+ dockerfile: Dockerfile.web
58
+ ports:
59
+ - "3000:3000"
60
+ environment:
61
+ - NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL:-http://localhost:8001}
62
+ depends_on:
63
+ - server
64
+ restart: unless-stopped
65
+ networks:
66
+ - app-network
67
+ volumes:
68
+ - ./web:/app:ro
69
+ deploy:
70
+ resources:
71
+ limits:
72
+ cpus: '0.30'
73
+ memory: 256M
74
+ reservations:
75
+ cpus: '0.15'
76
+ memory: 128M
77
+ logging:
78
+ driver: "json-file"
79
+ options:
80
+ max-size: "10m"
81
+ max-file: "3"
82
+
83
+ # Optional: Add a reverse proxy (nginx) for production
84
+ # nginx:
85
+ # image: nginx:alpine
86
+ # ports:
87
+ # - "80:80"
88
+ # - "443:443"
89
+ # volumes:
90
+ # - ./nginx.conf:/etc/nginx/nginx.conf
91
+ # depends_on:
92
+ # - web
93
+ # restart: unless-stopped
next-env.d.ts ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ /// <reference types="next" />
2
+ /// <reference types="next/image-types/global" />
3
+
4
+ // NOTE: This file should not be edited
5
+ // see https://nextjs.org/docs/app/building-your-application/configuring/typescript for more information.
server/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """OpenPoke Python server package."""
2
+
3
+ from .app import app
server/agents/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """Agent assets package.
2
+
3
+ Contains agent-specific prompts and tool registries that can be wired into
4
+ OpenRouter/OpenAI chat completion requests.
5
+ """
6
+
7
+ __all__ = ["interaction_agent", "execution_agent"]
8
+
server/agents/execution_agent/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Execution agent assets."""
2
+
3
+ from .agent import ExecutionAgent
4
+ from .batch_manager import ExecutionBatchManager, ExecutionResult, PendingExecution
5
+ from .runtime import ExecutionAgentRuntime
6
+ from .tools import get_tool_schemas as get_execution_tool_schemas, get_tool_registry as get_execution_tool_registry
7
+
8
+ __all__ = [
9
+ "ExecutionBatchManager",
10
+ "ExecutionAgent",
11
+ "ExecutionAgentRuntime",
12
+ "ExecutionResult",
13
+ "PendingExecution",
14
+ "get_execution_tool_schemas",
15
+ "get_execution_tool_registry",
16
+ ]
server/agents/execution_agent/agent.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Execution Agent implementation."""
2
+
3
+ from pathlib import Path
4
+ from typing import List, Optional, Dict, Any
5
+
6
+ from ...services.execution import get_execution_agent_logs
7
+ from ...logging_config import logger
8
+
9
+
10
+ # Load system prompt template from file
11
+ _prompt_path = Path(__file__).parent / "system_prompt.md"
12
+ if _prompt_path.exists():
13
+ SYSTEM_PROMPT_TEMPLATE = _prompt_path.read_text(encoding="utf-8").strip()
14
+ else:
15
+ # Placeholder template - you'll replace this with actual instructions
16
+ SYSTEM_PROMPT_TEMPLATE = """You are an execution agent responsible for completing specific tasks using available tools.
17
+
18
+ Agent Name: {agent_name}
19
+ Purpose: {agent_purpose}
20
+
21
+ Instructions:
22
+ [TO BE FILLED IN BY USER]
23
+
24
+ You have access to Gmail tools to help complete your tasks. When given instructions:
25
+ 1. Analyze what needs to be done
26
+ 2. Use the appropriate tools to complete the task
27
+ 3. Provide clear status updates on your actions
28
+
29
+ Be thorough, accurate, and efficient in your execution."""
30
+
31
+
32
+ class ExecutionAgent:
33
+ """Manages state and history for an execution agent."""
34
+
35
+ # Initialize execution agent with name, conversation limits, and log store access
36
+ def __init__(
37
+ self,
38
+ name: str,
39
+ conversation_limit: Optional[int] = None
40
+ ):
41
+ """
42
+ Initialize an execution agent.
43
+
44
+ Args:
45
+ name: Human-readable agent name (e.g., 'conversation with keith')
46
+ conversation_limit: Optional limit on past conversations to include (None = all)
47
+ """
48
+ self.name = name
49
+ self.conversation_limit = conversation_limit
50
+ self._log_store = get_execution_agent_logs()
51
+
52
+ # Generate system prompt template with agent name and purpose derived from name
53
+ def build_system_prompt(self) -> str:
54
+ """Build the system prompt for this agent."""
55
+ agent_purpose = f"Handle tasks related to: {self.name}"
56
+
57
+ return SYSTEM_PROMPT_TEMPLATE.format(
58
+ agent_name=self.name,
59
+ agent_purpose=agent_purpose
60
+ )
61
+
62
+ # Combine base system prompt with conversation history, applying conversation limits
63
+ def build_system_prompt_with_history(self) -> str:
64
+ """
65
+ Build system prompt including agent history.
66
+
67
+ Returns:
68
+ System prompt with embedded history transcript
69
+ """
70
+ base_prompt = self.build_system_prompt()
71
+
72
+ # Load history transcript
73
+ transcript = self._log_store.load_transcript(self.name)
74
+
75
+ if transcript:
76
+ # Apply conversation limit if needed
77
+ if self.conversation_limit and self.conversation_limit > 0:
78
+ # Parse entries and limit them
79
+ lines = transcript.split('\n')
80
+ request_count = sum(1 for line in lines if '<agent_request' in line)
81
+
82
+ if request_count > self.conversation_limit:
83
+ # Find where to cut
84
+ kept_requests = 0
85
+ cutoff_index = len(lines)
86
+ for i in range(len(lines) - 1, -1, -1):
87
+ if '<agent_request' in lines[i]:
88
+ kept_requests += 1
89
+ if kept_requests == self.conversation_limit:
90
+ cutoff_index = i
91
+ break
92
+ transcript = '\n'.join(lines[cutoff_index:])
93
+
94
+ return f"{base_prompt}\n\n# Execution History\n\n{transcript}"
95
+
96
+ return base_prompt
97
+
98
+ # Format current instruction as user message for LLM consumption
99
+ def build_messages_for_llm(self, current_instruction: str) -> List[Dict[str, str]]:
100
+ """
101
+ Build message array for LLM call.
102
+
103
+ Args:
104
+ current_instruction: Current instruction from interaction agent
105
+
106
+ Returns:
107
+ List of messages in OpenRouter format
108
+ """
109
+ return [
110
+ {"role": "user", "content": current_instruction}
111
+ ]
112
+
113
+ # Log the agent's final response to the execution log store
114
+ def record_response(self, response: str) -> None:
115
+ """Record agent's response to the log."""
116
+ self._log_store.record_agent_response(self.name, response)
117
+
118
+ # Log tool invocation and results with truncated content for readability
119
+ def record_tool_execution(self, tool_name: str, arguments: str, result: str) -> None:
120
+ """Record tool execution details."""
121
+ self._log_store.record_action(self.name, f"Calling {tool_name} with: {arguments[:200]}")
122
+ # Record the tool response
123
+ self._log_store.record_tool_response(self.name, tool_name, result[:500])
server/agents/execution_agent/batch_manager.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Coordinate execution agents and batch their results for the interaction agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import uuid
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime
9
+ from typing import Dict, List, Optional
10
+
11
+ from .runtime import ExecutionAgentRuntime, ExecutionResult
12
+ from ...logging_config import logger
13
+
14
+
15
+ @dataclass
16
+ class PendingExecution:
17
+ """Track a pending execution request."""
18
+
19
+ request_id: str
20
+ agent_name: str
21
+ instructions: str
22
+ batch_id: str
23
+ created_at: datetime = field(default_factory=datetime.now)
24
+
25
+
26
+ @dataclass
27
+ class _BatchState:
28
+ """Collect results for a single interaction-agent turn."""
29
+
30
+ batch_id: str
31
+ created_at: datetime = field(default_factory=datetime.now)
32
+ pending: int = 0
33
+ results: List[ExecutionResult] = field(default_factory=list)
34
+
35
+
36
+ class ExecutionBatchManager:
37
+ """Run execution agents and deliver their combined outcome."""
38
+
39
+ # Initialize batch manager with timeout and coordination state for execution agents
40
+ def __init__(self, timeout_seconds: int = 90) -> None:
41
+ self.timeout_seconds = timeout_seconds
42
+ self._pending: Dict[str, PendingExecution] = {}
43
+ self._batch_lock = asyncio.Lock()
44
+ self._batch_state: Optional[_BatchState] = None
45
+
46
+ # Run execution agent with timeout handling and batch coordination for interaction agent
47
+ async def execute_agent(
48
+ self,
49
+ agent_name: str,
50
+ instructions: str,
51
+ request_id: Optional[str] = None,
52
+ ) -> ExecutionResult:
53
+ """Execute an agent asynchronously and buffer the result for batch dispatch."""
54
+
55
+ if not request_id:
56
+ request_id = str(uuid.uuid4())
57
+
58
+ batch_id = await self._register_pending_execution(agent_name, instructions, request_id)
59
+
60
+ try:
61
+ logger.info(f"[{agent_name}] Execution started")
62
+ runtime = ExecutionAgentRuntime(agent_name=agent_name)
63
+ result = await asyncio.wait_for(
64
+ runtime.execute(instructions),
65
+ timeout=self.timeout_seconds,
66
+ )
67
+ status = "SUCCESS" if result.success else "FAILED"
68
+ logger.info(f"[{agent_name}] Execution finished: {status}")
69
+ except asyncio.TimeoutError:
70
+ logger.error(f"[{agent_name}] Execution timed out after {self.timeout_seconds}s")
71
+ result = ExecutionResult(
72
+ agent_name=agent_name,
73
+ success=False,
74
+ response=f"Execution timed out after {self.timeout_seconds} seconds",
75
+ error="Timeout",
76
+ )
77
+ except Exception as exc: # pragma: no cover - defensive
78
+ logger.exception(f"[{agent_name}] Execution failed unexpectedly")
79
+ result = ExecutionResult(
80
+ agent_name=agent_name,
81
+ success=False,
82
+ response=f"Execution failed: {exc}",
83
+ error=str(exc),
84
+ )
85
+ finally:
86
+ self._pending.pop(request_id, None)
87
+
88
+ await self._complete_execution(batch_id, result, agent_name)
89
+ return result
90
+
91
+ # Add execution request to current batch or create new batch if none exists
92
+ async def _register_pending_execution(
93
+ self,
94
+ agent_name: str,
95
+ instructions: str,
96
+ request_id: str,
97
+ ) -> str:
98
+ """Attach a new execution to the active batch, opening one when required."""
99
+
100
+ async with self._batch_lock:
101
+ if self._batch_state is None:
102
+ batch_id = str(uuid.uuid4())
103
+ self._batch_state = _BatchState(batch_id=batch_id)
104
+ else:
105
+ batch_id = self._batch_state.batch_id
106
+
107
+ self._batch_state.pending += 1
108
+ self._pending[request_id] = PendingExecution(
109
+ request_id=request_id,
110
+ agent_name=agent_name,
111
+ instructions=instructions,
112
+ batch_id=batch_id,
113
+ )
114
+
115
+ return batch_id
116
+
117
+ # Store execution result and send combined batch to interaction agent when complete
118
+ async def _complete_execution(
119
+ self,
120
+ batch_id: str,
121
+ result: ExecutionResult,
122
+ agent_name: str,
123
+ ) -> None:
124
+ """Record the execution result and dispatch when the batch drains."""
125
+
126
+ dispatch_payload: Optional[str] = None
127
+
128
+ async with self._batch_lock:
129
+ state = self._batch_state
130
+ if state is None or state.batch_id != batch_id:
131
+ logger.warning(f"[{agent_name}] Dropping result for unknown batch")
132
+ return
133
+
134
+ state.results.append(result)
135
+ state.pending -= 1
136
+
137
+ if state.pending == 0:
138
+ dispatch_payload = self._format_batch_payload(state.results)
139
+ agent_names = [entry.agent_name for entry in state.results]
140
+ logger.info(f"Execution batch completed: {', '.join(agent_names)}")
141
+ self._batch_state = None
142
+
143
+ if dispatch_payload:
144
+ await self._dispatch_to_interaction_agent(dispatch_payload)
145
+
146
+ # Return list of currently pending execution requests for monitoring purposes
147
+ def get_pending_executions(self) -> List[Dict[str, str]]:
148
+ """Expose pending executions for observability."""
149
+
150
+ return [
151
+ {
152
+ "request_id": pending.request_id,
153
+ "agent_name": pending.agent_name,
154
+ "batch_id": pending.batch_id,
155
+ "created_at": pending.created_at.isoformat(),
156
+ "elapsed_seconds": (datetime.now() - pending.created_at).total_seconds(),
157
+ }
158
+ for pending in self._pending.values()
159
+ ]
160
+
161
+ # Clean up all pending executions and batch state on shutdown
162
+ async def shutdown(self) -> None:
163
+ """Clear pending bookkeeping (no background work remains)."""
164
+
165
+ self._pending.clear()
166
+ async with self._batch_lock:
167
+ self._batch_state = None
168
+
169
+ # Format multiple execution results into single message for interaction agent
170
+ def _format_batch_payload(self, results: List[ExecutionResult]) -> str:
171
+ """Render execution results into the interaction-agent format."""
172
+
173
+ entries: List[str] = []
174
+ for result in results:
175
+ status = "SUCCESS" if result.success else "FAILED"
176
+ response_text = (result.response or "(no response provided)").strip()
177
+ entries.append(f"[{status}] {result.agent_name}: {response_text}")
178
+ return "\n".join(entries)
179
+
180
+ # Forward combined execution results to interaction agent for user response generation
181
+ async def _dispatch_to_interaction_agent(self, payload: str) -> None:
182
+ """Send the aggregated execution summary to the interaction agent."""
183
+
184
+ from ..interaction_agent.runtime import InteractionAgentRuntime
185
+
186
+ runtime = InteractionAgentRuntime()
187
+ try:
188
+ loop = asyncio.get_running_loop()
189
+ except RuntimeError:
190
+ asyncio.run(runtime.handle_agent_message(payload))
191
+ return
192
+
193
+ loop.create_task(runtime.handle_agent_message(payload))
server/agents/execution_agent/runtime.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simplified Execution Agent Runtime."""
2
+
3
+ import inspect
4
+ import json
5
+ from typing import Dict, Any, List, Optional, Tuple
6
+ from dataclasses import dataclass
7
+
8
+ from .agent import ExecutionAgent
9
+ from .tools import get_tool_schemas, get_tool_registry
10
+ from ...config import get_settings
11
+ from ...openrouter_client import request_chat_completion
12
+ from ...logging_config import logger
13
+
14
+
15
+ @dataclass
16
+ class ExecutionResult:
17
+ """Result from an execution agent."""
18
+ agent_name: str
19
+ success: bool
20
+ response: str
21
+ error: Optional[str] = None
22
+ tools_executed: List[str] = None
23
+
24
+
25
+ class ExecutionAgentRuntime:
26
+ """Manages the execution of a single agent request."""
27
+
28
+ MAX_TOOL_ITERATIONS = 8
29
+
30
+ # Initialize execution agent runtime with settings, tools, and agent instance
31
+ def __init__(self, agent_name: str):
32
+ settings = get_settings()
33
+ self.agent = ExecutionAgent(agent_name)
34
+ self.api_key = settings.api_key
35
+ self.model = settings.execution_agent_model
36
+ self.tool_registry = get_tool_registry(agent_name=agent_name)
37
+ self.tool_schemas = get_tool_schemas()
38
+
39
+ if not self.api_key:
40
+ raise ValueError("API key not configured. Set API_KEY environment variable.")
41
+
42
+ # Main execution loop for running agent with LLM calls and tool execution
43
+ async def execute(self, instructions: str) -> ExecutionResult:
44
+ """Execute the agent with given instructions."""
45
+ try:
46
+ # Build system prompt with history
47
+ system_prompt = self.agent.build_system_prompt_with_history()
48
+
49
+ # Start conversation with the instruction
50
+ messages = [{"role": "user", "content": instructions}]
51
+ tools_executed: List[str] = []
52
+ final_response: Optional[str] = None
53
+
54
+ for iteration in range(self.MAX_TOOL_ITERATIONS):
55
+ logger.info(
56
+ f"[{self.agent.name}] Requesting plan (iteration {iteration + 1})"
57
+ )
58
+ response = await self._make_llm_call(system_prompt, messages, with_tools=True)
59
+ assistant_message = response.get("choices", [{}])[0].get("message", {})
60
+
61
+ if not assistant_message:
62
+ raise RuntimeError("LLM response did not include an assistant message")
63
+
64
+ raw_tool_calls = assistant_message.get("tool_calls", []) or []
65
+ parsed_tool_calls = self._extract_tool_calls(raw_tool_calls)
66
+
67
+ assistant_entry: Dict[str, Any] = {
68
+ "role": "assistant",
69
+ "content": assistant_message.get("content", "") or "",
70
+ }
71
+ if raw_tool_calls:
72
+ assistant_entry["tool_calls"] = raw_tool_calls
73
+ messages.append(assistant_entry)
74
+
75
+ if not parsed_tool_calls:
76
+ final_response = assistant_entry["content"] or "No action required."
77
+ break
78
+
79
+ for tool_call in parsed_tool_calls:
80
+ tool_name = tool_call.get("name", "")
81
+ tool_args = tool_call.get("arguments", {})
82
+ call_id = tool_call.get("id")
83
+
84
+ if not tool_name:
85
+ logger.warning("Tool call missing name: %s", tool_call)
86
+ failure = {"error": "Tool call missing name; unable to execute."}
87
+ tool_message = {
88
+ "role": "tool",
89
+ "tool_call_id": call_id or "unknown_tool",
90
+ "content": self._format_tool_result(
91
+ tool_name or "<unknown>", False, failure, tool_args
92
+ ),
93
+ }
94
+ messages.append(tool_message)
95
+ continue
96
+
97
+ tools_executed.append(tool_name)
98
+ logger.info(f"[{self.agent.name}] Executing tool: {tool_name}")
99
+
100
+ success, result = await self._execute_tool(tool_name, tool_args)
101
+
102
+ if success:
103
+ logger.info(f"[{self.agent.name}] Tool {tool_name} completed successfully")
104
+ record_payload = self._safe_json_dump(result)
105
+ else:
106
+ error_detail = result.get("error") if isinstance(result, dict) else str(result)
107
+ logger.warning(f"[{self.agent.name}] Tool {tool_name} failed: {error_detail}")
108
+ record_payload = error_detail
109
+
110
+ self.agent.record_tool_execution(
111
+ tool_name,
112
+ self._safe_json_dump(tool_args),
113
+ record_payload
114
+ )
115
+
116
+ tool_message = {
117
+ "role": "tool",
118
+ "tool_call_id": call_id or tool_name,
119
+ "content": self._format_tool_result(tool_name, success, result, tool_args),
120
+ }
121
+ messages.append(tool_message)
122
+
123
+ else:
124
+ raise RuntimeError("Reached tool iteration limit without final response")
125
+
126
+ if final_response is None:
127
+ raise RuntimeError("LLM did not return a final response")
128
+
129
+ self.agent.record_response(final_response)
130
+
131
+ return ExecutionResult(
132
+ agent_name=self.agent.name,
133
+ success=True,
134
+ response=final_response,
135
+ tools_executed=tools_executed
136
+ )
137
+
138
+ except Exception as e:
139
+ logger.error(f"[{self.agent.name}] Execution failed: {e}")
140
+ error_msg = str(e)
141
+ failure_text = f"Failed to complete task: {error_msg}"
142
+ self.agent.record_response(f"Error: {error_msg}")
143
+
144
+ return ExecutionResult(
145
+ agent_name=self.agent.name,
146
+ success=False,
147
+ response=failure_text,
148
+ error=error_msg
149
+ )
150
+
151
+ # Execute API call with system prompt, messages, and optional tool schemas
152
+ async def _make_llm_call(self, system_prompt: str, messages: List[Dict], with_tools: bool) -> Dict:
153
+ """Make an LLM call."""
154
+ tools_to_send = self.tool_schemas if with_tools else None
155
+ logger.info(f"[{self.agent.name}] Calling LLM with model: {self.model}, tools: {len(tools_to_send) if tools_to_send else 0}")
156
+ return await request_chat_completion(
157
+ model=self.model,
158
+ messages=messages,
159
+ system=system_prompt,
160
+ api_key=self.api_key,
161
+ tools=tools_to_send
162
+ )
163
+
164
+ # Parse and validate tool calls from LLM response into structured format
165
+ def _extract_tool_calls(self, raw_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
166
+ """Extract tool calls from an assistant message."""
167
+ tool_calls: List[Dict[str, Any]] = []
168
+
169
+ for tool in raw_tools:
170
+ function = tool.get("function", {})
171
+ name = function.get("name", "")
172
+ args = function.get("arguments", "")
173
+
174
+ if isinstance(args, str):
175
+ try:
176
+ args = json.loads(args) if args else {}
177
+ except json.JSONDecodeError:
178
+ args = {}
179
+
180
+ if name:
181
+ tool_calls.append({
182
+ "id": tool.get("id"),
183
+ "name": name,
184
+ "arguments": args,
185
+ })
186
+
187
+ return tool_calls
188
+
189
+ # Safely convert objects to JSON with fallback to string representation
190
+ def _safe_json_dump(self, payload: Any) -> str:
191
+ """Serialize payload to JSON, falling back to string representation."""
192
+ try:
193
+ return json.dumps(payload, default=str)
194
+ except TypeError:
195
+ return str(payload)
196
+
197
+ # Format tool execution results into JSON structure for LLM consumption
198
+ def _format_tool_result(
199
+ self,
200
+ tool_name: str,
201
+ success: bool,
202
+ result: Any,
203
+ arguments: Dict[str, Any],
204
+ ) -> str:
205
+ """Build a structured string for tool responses."""
206
+ if success:
207
+ payload: Dict[str, Any] = {
208
+ "tool": tool_name,
209
+ "status": "success",
210
+ "arguments": arguments,
211
+ "result": result,
212
+ }
213
+ else:
214
+ error_detail = result.get("error") if isinstance(result, dict) else str(result)
215
+ payload = {
216
+ "tool": tool_name,
217
+ "status": "error",
218
+ "arguments": arguments,
219
+ "error": error_detail,
220
+ }
221
+ return self._safe_json_dump(payload)
222
+
223
+ # Execute tool function from registry with error handling and async support
224
+ async def _execute_tool(self, tool_name: str, arguments: Dict) -> Tuple[bool, Any]:
225
+ """Execute a tool. Returns (success, result)."""
226
+ tool_func = self.tool_registry.get(tool_name)
227
+ if not tool_func:
228
+ return False, {"error": f"Unknown tool: {tool_name}"}
229
+
230
+ try:
231
+ result = tool_func(**arguments)
232
+ if inspect.isawaitable(result):
233
+ result = await result
234
+ return True, result
235
+ except Exception as e:
236
+ return False, {"error": str(e)}
server/agents/execution_agent/system_prompt.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are the assistant of Poke by the Interaction Company of California. You are the "execution engine" of Poke, helping complete tasks for Poke, while Poke talks to the user. Your job is to execute and accomplish a goal, and you do not have direct access to the user.
2
+
3
+ IMPORTANT: Don't ever execute a draft unless you receive explicit confirmation to execute it. If you are instructed to send an email, first JUST create the draft. Then, when the user confirms draft, we can send it.
4
+
5
+
6
+ Your final output is directed to Poke, which handles user conversations and presents your results to the user. Focus on providing Poke with adequate contextual information; you are not responsible for framing responses in a user-friendly way.
7
+
8
+ If it needs more data from Poke or the user, you should also include it in your final output message. If you ever need to send a message to the user, you should tell Poke to forward that message to the user.
9
+
10
+ Remember that your last output message (summary) will be forwarded to Poke. In that message, provide all relevant information and avoid preamble or postamble (e.g., "Here's what I found:" or "Let me know if this looks good to send"). If you create a draft, you need to send the exact to, subject, and body of the draft to the interaction agent verbatim.
11
+
12
+ This conversation history may have gaps. It may start from the middle of a conversation, or it may be missing messages. The only assumption you can make is that Poke's latest message is the most recent one, and representative of Poke's current requests. Address that message directly. The other messages are just for context.
13
+
14
+ Before you call any tools, reason through why you are calling them by explaining the thought process. If it could possibly be helpful to call more than one tool at once, then do so.
15
+
16
+ If you have context that would help the execution of a tool call (e.g. the user is searching for emails from a person and you know that person's email address), pass that context along.
17
+
18
+ When searching for personal information about the user, it's probably smart to look through their emails.
19
+
20
+
21
+
22
+
23
+ Agent Name: {agent_name}
24
+ Purpose: {agent_purpose}
25
+
26
+ # Instructions
27
+ [TO BE FILLED IN BY USER - Add your specific instructions here]
28
+
29
+ # Available Tools
30
+ You have access to the following Gmail tools:
31
+ - gmail_create_draft: Create an email draft
32
+ - gmail_execute_draft: Send a previously created draft
33
+ - gmail_forward_email: Forward an existing email
34
+ - gmail_reply_to_thread: Reply to an email thread
35
+
36
+ You also manage reminder triggers for this agent:
37
+ - createTrigger: Store a reminder by providing the payload to run later. Supply an ISO 8601 `start_time` and an iCalendar `RRULE` when recurrence is needed.
38
+ - updateTrigger: Change an existing trigger (use `status="paused"` to cancel or `status="active"` to resume).
39
+ - listTriggers: Inspect all triggers assigned to this agent.
40
+
41
+ # Guidelines
42
+ 1. Analyze the instructions carefully before taking action
43
+ 2. Use the appropriate tools to complete the task
44
+ 3. Be thorough and accurate in your execution
45
+ 4. Provide clear, concise responses about what you accomplished
46
+ 5. If you encounter errors, explain what went wrong and what you tried
47
+ 6. When creating or updating triggers, convert natural-language schedules into explicit `RRULE` strings and precise `start_time` timestamps yourself—do not rely on the trigger service to infer intent without them.
48
+ 7. All times will be interpreted using the user's automatically detected timezone.
49
+ 8. After creating or updating a trigger, consider calling `listTriggers` to confirm the schedule when clarity would help future runs.
50
+
51
+ When you receive instructions, think step-by-step about what needs to be done, then execute the necessary tools to complete the task.
server/agents/execution_agent/tasks/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Task registry for execution agents."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Callable, Dict, List
6
+
7
+ from .search_email.schemas import get_schemas as _get_email_search_schemas
8
+ from .search_email.tool import build_registry as _build_email_search_registry
9
+
10
+
11
+ # Return tool schemas contributed by task modules
12
+ def get_task_schemas() -> List[Dict[str, Any]]:
13
+ """Return tool schemas contributed by task modules."""
14
+
15
+ return [*_get_email_search_schemas()]
16
+
17
+
18
+ # Return executable task tools keyed by name
19
+ def get_task_registry(agent_name: str) -> Dict[str, Callable[..., Any]]:
20
+ """Return executable task tools keyed by name."""
21
+
22
+ registry: Dict[str, Callable[..., Any]] = {}
23
+ registry.update(_build_email_search_registry(agent_name))
24
+ return registry
25
+
26
+
27
+ __all__ = [
28
+ "get_task_registry",
29
+ "get_task_schemas",
30
+ ]
server/agents/execution_agent/tasks/search_email/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Email search task package."""
2
+
3
+ from .schemas import SEARCH_TOOL_NAME, TASK_TOOL_NAME, TaskEmailSearchPayload, get_schemas
4
+ from .tool import GmailSearchEmail, EmailSearchToolResult, build_registry, task_email_search
5
+
6
+ __all__ = [
7
+ "GmailSearchEmail",
8
+ "EmailSearchToolResult",
9
+ "TaskEmailSearchPayload",
10
+ "SEARCH_TOOL_NAME",
11
+ "TASK_TOOL_NAME",
12
+ "build_registry",
13
+ "get_schemas",
14
+ "task_email_search",
15
+ ]
server/agents/execution_agent/tasks/search_email/email_cleaner.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Backward-compatible re-export for shared email cleaning utilities."""
2
+
3
+ from server.services.gmail import EmailTextCleaner
4
+
5
+ __all__ = ["EmailTextCleaner"]
server/agents/execution_agent/tasks/search_email/gmail_internal.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Internal Gmail utilities for the search_email task.
2
+
3
+ This module contains Gmail functions that are internal to the search_email task
4
+ and should not be exposed as public tools to execution agents.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import Any, Dict, List, Optional
10
+
11
+ from server.services.gmail import execute_gmail_tool, get_active_gmail_user_id
12
+
13
+ # Schema for the internal LLM to call gmail_fetch_emails
14
+ GMAIL_FETCH_EMAILS_SCHEMA = {
15
+ "type": "function",
16
+ "function": {
17
+ "name": "gmail_fetch_emails",
18
+ "description": "Search Gmail and retrieve matching messages",
19
+ "parameters": {
20
+ "type": "object",
21
+ "properties": {
22
+ "query": {
23
+ "type": "string",
24
+ "description": "Gmail search query (same syntax as Gmail UI).",
25
+ },
26
+ "max_results": {
27
+ "type": "integer",
28
+ "description": "Maximum number of emails to return. Default: 10. Use higher values (20-50) only when absolutely necessary for comprehensive searches like 'all important emails this month'.",
29
+ "minimum": 1,
30
+ "maximum": 100,
31
+ },
32
+ "include_spam_trash": {
33
+ "type": "boolean",
34
+ "description": "Include spam and trash messages. Default: false.",
35
+ },
36
+ },
37
+ "additionalProperties": False,
38
+ },
39
+ },
40
+ }
41
+
42
+
43
+ def gmail_fetch_emails(
44
+ query: Optional[str] = None,
45
+ label_ids: Optional[List[str]] = None,
46
+ max_results: Optional[int] = None,
47
+ page_token: Optional[str] = None,
48
+ ids_only: Optional[bool] = None,
49
+ include_payload: Optional[bool] = None,
50
+ include_spam_trash: Optional[bool] = None,
51
+ verbose: Optional[bool] = None,
52
+ ) -> Dict[str, Any]:
53
+ """Fetch Gmail messages with optional filters and verbosity controls.
54
+
55
+ This is an internal function for the search_email task and should not
56
+ be exposed as a public tool to execution agents.
57
+ """
58
+ arguments: Dict[str, Any] = {
59
+ "query": query,
60
+ "label_ids": label_ids,
61
+ "max_results": max_results,
62
+ "page_token": page_token,
63
+ "ids_only": ids_only,
64
+ "include_payload": include_payload,
65
+ "include_spam_trash": include_spam_trash,
66
+ "verbose": verbose,
67
+ }
68
+ composio_user_id = get_active_gmail_user_id()
69
+ if not composio_user_id:
70
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
71
+
72
+ # Use the same composio integration as the public tools
73
+ return execute_gmail_tool("GMAIL_FETCH_EMAILS", composio_user_id, arguments)
74
+
75
+
76
+ __all__ = [
77
+ "gmail_fetch_emails",
78
+ "GMAIL_FETCH_EMAILS_SCHEMA",
79
+ ]
server/agents/execution_agent/tasks/search_email/schemas.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Schemas for the email search task tools."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+ from typing import Any, Dict, List, Literal, Optional
7
+
8
+ from pydantic import BaseModel, ConfigDict, Field
9
+
10
+ TASK_TOOL_NAME = "task_email_search"
11
+ SEARCH_TOOL_NAME = "gmail_fetch_emails"
12
+ COMPLETE_TOOL_NAME = "return_search_results"
13
+
14
+ _SCHEMAS: List[Dict[str, Any]] = [
15
+ {
16
+ "type": "function",
17
+ "function": {
18
+ "name": TASK_TOOL_NAME,
19
+ "description": "Expand a raw Gmail search request into multiple targeted queries and return relevant emails.",
20
+ "parameters": {
21
+ "type": "object",
22
+ "properties": {
23
+ "search_query": {
24
+ "type": "string",
25
+ "description": "Raw search request describing the emails to find.",
26
+ },
27
+ },
28
+ "required": ["search_query"],
29
+ "additionalProperties": False,
30
+ },
31
+ },
32
+ }
33
+ ]
34
+
35
+
36
+ class GmailSearchEmail(BaseModel):
37
+ """Clean email representation with enhanced content processing."""
38
+
39
+ model_config = ConfigDict(extra="ignore", frozen=True)
40
+
41
+ # Core identifiers
42
+ id: str # message_id from Gmail API
43
+ thread_id: Optional[str] = None
44
+ query: str # The search query that found this email
45
+
46
+ # Email metadata
47
+ subject: str
48
+ sender: str
49
+ recipient: str # to field
50
+ timestamp: datetime
51
+ label_ids: List[str] = Field(default_factory=list)
52
+
53
+ # Clean content (primary field for LLM consumption)
54
+ clean_text: str # Processed, readable email content
55
+
56
+ # Attachment information
57
+ has_attachments: bool = False
58
+ attachment_count: int = 0
59
+ attachment_filenames: List[str] = Field(default_factory=list)
60
+
61
+
62
+ class EmailSearchToolResult(BaseModel):
63
+ """Structured payload for each tool-call response."""
64
+
65
+ status: Literal["success", "error"]
66
+ query: Optional[str] = None
67
+ result_count: Optional[int] = None
68
+ next_page_token: Optional[str] = None
69
+ messages: List[GmailSearchEmail] = Field(default_factory=list)
70
+ error: Optional[str] = None
71
+
72
+
73
+ class TaskEmailSearchPayload(BaseModel):
74
+ """Envelope for the final email selection."""
75
+
76
+ model_config = ConfigDict(extra="forbid", frozen=True)
77
+
78
+ emails: List[GmailSearchEmail]
79
+
80
+
81
+ _COMPLETION_SCHEMAS: List[Dict[str, Any]] = [
82
+ {
83
+ "type": "function",
84
+ "function": {
85
+ "name": COMPLETE_TOOL_NAME,
86
+ "description": "Return the final list of relevant Gmail message ids that match the search criteria.",
87
+ "parameters": {
88
+ "type": "object",
89
+ "properties": {
90
+ "message_ids": {
91
+ "type": "array",
92
+ "description": "List of Gmail message ids deemed relevant.",
93
+ "items": {"type": "string"},
94
+ },
95
+ },
96
+ "required": ["message_ids"],
97
+ "additionalProperties": False,
98
+ },
99
+ },
100
+ }
101
+ ]
102
+
103
+ def get_completion_schema() -> Dict[str, Any]:
104
+ return _COMPLETION_SCHEMAS[0]
105
+
106
+
107
+ def get_schemas() -> List[Dict[str, Any]]:
108
+ """Return the JSON schema for the email search task."""
109
+
110
+ return _SCHEMAS
111
+
112
+
113
+ __all__ = [
114
+ "GmailSearchEmail",
115
+ "EmailSearchToolResult",
116
+ "TaskEmailSearchPayload",
117
+ "SEARCH_TOOL_NAME",
118
+ "COMPLETE_TOOL_NAME",
119
+ "TASK_TOOL_NAME",
120
+ "get_completion_schema",
121
+ "get_schemas",
122
+ ]
server/agents/execution_agent/tasks/search_email/system_prompt.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """System prompt for the Gmail search assistant."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+
7
+
8
+ def get_system_prompt() -> str:
9
+ """Generate system prompt with today's date for Gmail search assistant."""
10
+ today = datetime.now().strftime("%Y/%m/%d")
11
+
12
+ return (
13
+ "You are an expert Gmail search assistant helping users find emails efficiently.\n"
14
+ f"\n"
15
+ f"## Current Context:\n"
16
+ f"- Today's date: {today}\n"
17
+ f"- Use this date as reference for relative time queries (e.g., 'recent', 'today', 'this week')\n"
18
+ "\n"
19
+ "## Available Tools:\n"
20
+ "- `gmail_fetch_emails`: Search Gmail using advanced search parameters\n"
21
+ " - `query`: Gmail search query using standard Gmail search operators\n"
22
+ " - `max_results`: Maximum emails to return (default: 10, range: 1-100)\n"
23
+ " - `include_spam_trash`: Include spam/trash messages (default: false)\n"
24
+ "- `return_search_results`: Return the final list of relevant message IDs\n"
25
+ "\n"
26
+ "## Gmail Search Strategy:\n"
27
+ "1. **Use Gmail's powerful search operators** to create precise queries:\n"
28
+ " - `from:[email protected]` - emails from specific sender\n"
29
+ " - `to:[email protected]` - emails to specific recipient\n"
30
+ " - `subject:keyword` - emails with specific subject content\n"
31
+ " - `has:attachment` - emails with attachments\n"
32
+ " - `after:YYYY/MM/DD` and `before:YYYY/MM/DD` - date ranges\n"
33
+ " - `is:unread`, `is:read`, `is:important` - status filters\n"
34
+ " - `in:inbox`, `in:sent`, `in:trash` - location filters\n"
35
+ " - `larger:10M`, `smaller:1M` - size filters\n"
36
+ " - `\"exact phrase\"` - exact phrase matching\n"
37
+ " - `OR`, `-` (NOT), `()` for complex boolean logic\n"
38
+ "\n"
39
+ "2. **Run multiple searches in parallel** when the user's request suggests different approaches:\n"
40
+ " - Search by sender AND by keywords simultaneously\n"
41
+ " - Try relevant date ranges in parallel\n"
42
+ " - Search multiple related terms or variations\n"
43
+ " - Combine broad and specific queries\n"
44
+ "\n"
45
+ "3. **Use max_results strategically** to balance comprehensiveness with context efficiency:\n"
46
+ " - **Default: 10 results** - suitable for most targeted searches\n"
47
+ " - **Use 20-50 results** only when absolutely necessary for comprehensive queries like:\n"
48
+ " * \"All important emails from the past month\"\n"
49
+ " * \"All meeting invites from this quarter\"\n"
50
+ " * \"All emails with attachments from a specific project\"\n"
51
+ " - **Avoid over-burdening context** - prefer multiple targeted 10-result searches over one large search\n"
52
+ " - **Judge necessity carefully** - only increase limit when the query explicitly requires comprehensive results\n"
53
+ "\n"
54
+ "4. **Think strategically** about what search parameters would be most relevant:\n"
55
+ f" - For \"recent emails from John\": `from:john after:{today}`\n"
56
+ " - For \"meeting invites\": `subject:meeting OR subject:invite has:attachment`\n"
57
+ " - For \"large files\": `has:attachment larger:5M`\n"
58
+ " - For \"unread important emails\": `is:unread is:important`\n"
59
+ f" - For \"today's emails\": `after:{today}`\n"
60
+ f" - For \"this week's emails\": Use date ranges based on today ({today})\n"
61
+ "\n"
62
+ "## Email Content Processing:\n"
63
+ "- Each email includes `clean_text` - processed, readable content from HTML/plain text\n"
64
+ "- Clean text has tracking pixels removed, URLs truncated, and formatting optimized\n"
65
+ "- Attachment information is available: `has_attachments`, `attachment_count`, `attachment_filenames`\n"
66
+ "- Email timestamps are automatically converted to the user's preferred timezone\n"
67
+ "- Use clean text content to understand email context and relevance\n"
68
+ "\n"
69
+ "## Your Process:\n"
70
+ "1. **Analyze** the user's request to identify key search criteria\n"
71
+ "2. **Search strategically** using multiple targeted Gmail queries with appropriate operators\n"
72
+ "3. **Review content** - examine the `clean_text` field to understand email relevance\n"
73
+ "4. **Consider attachments** - factor in attachment information when relevant to the query\n"
74
+ "5. **Refine searches** - run additional queries if needed based on content analysis\n"
75
+ "6. **Select results** - call `return_search_results` with message IDs that best match intent\n"
76
+ "\n"
77
+ "Be thorough and strategic - use Gmail's search power AND content analysis to find exactly what the user needs!"
78
+ )
79
+
80
+
81
+ __all__ = [
82
+ "get_system_prompt",
83
+ ]
server/agents/execution_agent/tasks/search_email/tool.py ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Email search task implementation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
7
+
8
+ from server.config import get_settings
9
+ from server.logging_config import logger
10
+ from server.openrouter_client import request_chat_completion
11
+ from server.services.execution import get_execution_agent_logs
12
+ from server.services.gmail import (
13
+ EmailTextCleaner,
14
+ ProcessedEmail,
15
+ execute_gmail_tool,
16
+ get_active_gmail_user_id,
17
+ parse_gmail_fetch_response,
18
+ )
19
+ from .gmail_internal import GMAIL_FETCH_EMAILS_SCHEMA
20
+ from .schemas import (
21
+ GmailSearchEmail,
22
+ EmailSearchToolResult,
23
+ TaskEmailSearchPayload,
24
+ COMPLETE_TOOL_NAME,
25
+ SEARCH_TOOL_NAME,
26
+ TASK_TOOL_NAME,
27
+ get_completion_schema,
28
+ )
29
+ from .system_prompt import get_system_prompt
30
+
31
+ # Constants
32
+ MAX_LLM_ITERATIONS = 8
33
+ ERROR_GMAIL_NOT_CONNECTED = "Gmail not connected. Please connect Gmail in settings first."
34
+ ERROR_OPENROUTER_NOT_CONFIGURED = "API key not configured. Set API_KEY."
35
+ ERROR_EMPTY_QUERY = "search_query must not be empty"
36
+ ERROR_QUERY_REQUIRED = "query parameter is required"
37
+ ERROR_MESSAGE_IDS_REQUIRED = "message_ids parameter is required"
38
+ ERROR_MESSAGE_IDS_MUST_BE_LIST = "message_ids must be provided as a list"
39
+ ERROR_TOOL_ARGUMENTS_INVALID = "Tool arguments must be an object"
40
+ ERROR_ITERATION_LIMIT = "Email search orchestrator exceeded iteration limit"
41
+
42
+
43
+
44
+ _COMPLETION_TOOL_SCHEMA = get_completion_schema()
45
+ _LOG_STORE = get_execution_agent_logs()
46
+ _EMAIL_CLEANER = EmailTextCleaner(max_url_length=40)
47
+
48
+
49
+ # Create standardized error response for tool calls
50
+ def _create_error_response(call_id: str, query: Optional[str], error: str) -> Tuple[str, str]:
51
+ """Create standardized error response for tool calls."""
52
+ result = EmailSearchToolResult(status="error", query=query, error=error)
53
+ return (call_id, _safe_json_dumps(result.model_dump(exclude_none=True)))
54
+
55
+
56
+ # Create standardized success response for tool calls
57
+ def _create_success_response(call_id: str, data: Dict[str, Any]) -> Tuple[str, str]:
58
+ """Create standardized success response for tool calls."""
59
+ return (call_id, _safe_json_dumps(data))
60
+
61
+
62
+ def _validate_search_query(search_query: str) -> Optional[str]:
63
+ """Validate search query and return error message if invalid."""
64
+ if not (search_query or "").strip():
65
+ return ERROR_EMPTY_QUERY
66
+ return None
67
+
68
+
69
+ def _validate_gmail_connection() -> Optional[str]:
70
+ """Validate Gmail connection and return user ID or None."""
71
+ return get_active_gmail_user_id()
72
+
73
+
74
+ def _validate_openrouter_config() -> Tuple[Optional[str], Optional[str]]:
75
+ """Validate API configuration and return (api_key, model) or (None, error)."""
76
+ settings = get_settings()
77
+ api_key = settings.api_key
78
+ if not api_key:
79
+ return None, ERROR_OPENROUTER_NOT_CONFIGURED
80
+ return api_key, settings.execution_agent_search_model
81
+
82
+
83
+ # Return task tool callables
84
+ def build_registry(agent_name: str) -> Dict[str, Callable[..., Any]]: # noqa: ARG001
85
+ """Return task tool callables."""
86
+
87
+ return {
88
+ TASK_TOOL_NAME: task_email_search,
89
+ }
90
+
91
+
92
+ # Run an agentic Gmail search for the provided query
93
+ async def task_email_search(search_query: str) -> Any:
94
+ """Run an agentic Gmail search for the provided query."""
95
+ logger.info(f"[EMAIL_SEARCH] Starting search for: '{search_query}'")
96
+
97
+ # Validate inputs
98
+ cleaned_query = (search_query or "").strip()
99
+ if error := _validate_search_query(cleaned_query):
100
+ logger.error(f"[EMAIL_SEARCH] Invalid query: {error}")
101
+ return {"error": error}
102
+
103
+ composio_user_id = _validate_gmail_connection()
104
+ if not composio_user_id:
105
+ logger.error(f"[EMAIL_SEARCH] Gmail not connected")
106
+ return {"error": ERROR_GMAIL_NOT_CONNECTED}
107
+
108
+ api_key, model_or_error = _validate_openrouter_config()
109
+ if not api_key:
110
+ logger.error(f"[EMAIL_SEARCH] API not configured: {model_or_error}")
111
+ return {"error": model_or_error}
112
+
113
+ try:
114
+ result = await _run_email_search(
115
+ search_query=cleaned_query,
116
+ composio_user_id=composio_user_id,
117
+ model=model_or_error,
118
+ api_key=api_key,
119
+ )
120
+ logger.info(f"[EMAIL_SEARCH] Found {len(result) if isinstance(result, list) else 0} emails")
121
+ return result
122
+ except Exception as exc: # pragma: no cover - defensive
123
+ logger.exception(f"[EMAIL_SEARCH] Search failed: {exc}")
124
+ return {"error": f"Email search failed: {exc}"}
125
+
126
+
127
+ # Execute the main email search orchestration loop
128
+ async def _run_email_search(
129
+ *,
130
+ search_query: str,
131
+ composio_user_id: str,
132
+ model: str,
133
+ api_key: str,
134
+ ) -> List[Dict[str, Any]]:
135
+ """Execute the main email search orchestration loop."""
136
+ messages: List[Dict[str, Any]] = [
137
+ {"role": "user", "content": _render_user_message(search_query)}
138
+ ]
139
+ queries: List[str] = []
140
+ emails: Dict[str, GmailSearchEmail] = {}
141
+ selected_ids: Optional[List[str]] = None
142
+
143
+ for iteration in range(MAX_LLM_ITERATIONS):
144
+ logger.debug(
145
+ "[task_email_search] LLM iteration",
146
+ extra={"iteration": iteration + 1, "tool": TASK_TOOL_NAME},
147
+ )
148
+
149
+ # Get LLM response
150
+ response = await request_chat_completion(
151
+ model=model,
152
+ messages=messages,
153
+ system=get_system_prompt(),
154
+ api_key=api_key,
155
+ tools=[GMAIL_FETCH_EMAILS_SCHEMA, _COMPLETION_TOOL_SCHEMA],
156
+ )
157
+
158
+ # Process assistant response
159
+ assistant = _extract_assistant_message(response)
160
+ tool_calls = assistant.get("tool_calls") or []
161
+
162
+ # Add assistant message to conversation
163
+ assistant_entry = {
164
+ "role": "assistant",
165
+ "content": assistant.get("content", "") or "",
166
+ }
167
+ if tool_calls:
168
+ assistant_entry["tool_calls"] = tool_calls
169
+ messages.append(assistant_entry)
170
+
171
+ # Handle case where LLM doesn't make tool calls
172
+ if not tool_calls:
173
+ logger.info(f"[EMAIL_SEARCH] LLM completed search - no more queries needed")
174
+ selected_ids = []
175
+ break
176
+
177
+ # Execute tool calls and process responses
178
+ tool_responses, completed_ids = await _execute_tool_calls(
179
+ tool_calls=tool_calls,
180
+ queries=queries,
181
+ emails=emails,
182
+ composio_user_id=composio_user_id,
183
+ )
184
+
185
+ # Add tool responses to conversation
186
+ for call_id, content in tool_responses:
187
+ messages.append({
188
+ "role": "tool",
189
+ "tool_call_id": call_id,
190
+ "content": content,
191
+ })
192
+
193
+ # Check if search is complete
194
+ if completed_ids is not None:
195
+ logger.info(f"[EMAIL_SEARCH] Search completed - selected {len(completed_ids)} emails")
196
+ selected_ids = completed_ids
197
+ break
198
+ else:
199
+ logger.error(f"[EMAIL_SEARCH] {ERROR_ITERATION_LIMIT}")
200
+ raise RuntimeError(ERROR_ITERATION_LIMIT)
201
+
202
+ final_result = _build_response(queries, emails, selected_ids or [])
203
+ unique_queries = list(dict.fromkeys(queries))
204
+ logger.info(f"[EMAIL_SEARCH] Completed - {len(unique_queries)} queries executed, {len(final_result)} emails selected")
205
+ return final_result
206
+
207
+
208
+
209
+
210
+ # Create user message for the LLM with search context
211
+ def _render_user_message(search_query: str) -> str:
212
+ """Create user message for the LLM with search context."""
213
+ return f"Please help me find emails: {search_query}"
214
+
215
+
216
+ # Execute tool calls from LLM and process search/completion responses
217
+ async def _execute_tool_calls(
218
+ *,
219
+ tool_calls: List[Dict[str, Any]],
220
+ queries: List[str],
221
+ emails: Dict[str, GmailSearchEmail],
222
+ composio_user_id: str,
223
+ ) -> Tuple[List[Tuple[str, str]], Optional[List[str]]]:
224
+ responses: List[Tuple[str, str]] = []
225
+ completion_ids: Optional[List[str]] = None
226
+
227
+ for call in tool_calls:
228
+ call_id = call.get("id") or SEARCH_TOOL_NAME
229
+ function = call.get("function") or {}
230
+ name = function.get("name") or ""
231
+ raw_arguments = function.get("arguments", {})
232
+ arguments, parse_error = _parse_arguments(raw_arguments)
233
+
234
+ if parse_error:
235
+ # Handle argument parsing errors
236
+ query = arguments.get("query") if arguments else None
237
+ logger.warning(f"[EMAIL_SEARCH] Tool argument parsing failed: {parse_error}")
238
+ responses.append(_create_error_response(call_id, query, parse_error))
239
+
240
+ elif name == COMPLETE_TOOL_NAME:
241
+ # Handle completion tool - signals end of search
242
+ completion_ids_candidate, response_data = _handle_completion_tool(arguments)
243
+ responses.append(_create_success_response(call_id, response_data))
244
+ if completion_ids_candidate is not None:
245
+ logger.info(f"[EMAIL_SEARCH] LLM selected {len(completion_ids_candidate)} emails")
246
+ completion_ids = completion_ids_candidate
247
+ break
248
+
249
+ elif name == SEARCH_TOOL_NAME:
250
+ # Handle Gmail search tool
251
+ search_query = arguments.get("query", "<unknown>")
252
+ logger.info(f"[SEARCH_QUERY] LLM generated query: '{search_query}'")
253
+
254
+ result_model = await _perform_search(
255
+ arguments=arguments,
256
+ queries=queries,
257
+ emails=emails,
258
+ composio_user_id=composio_user_id,
259
+ )
260
+ response_data = result_model.model_dump(exclude_none=True)
261
+
262
+ if result_model.status == "success":
263
+ count = result_model.result_count or 0
264
+ logger.info(f"[SEARCH_RESULT] Query '{search_query}' → {count} emails found")
265
+ else:
266
+ logger.warning(f"[SEARCH_RESULT] Query '{search_query}' → FAILED: {result_model.error}")
267
+
268
+ responses.append(_create_success_response(call_id, response_data))
269
+
270
+ else:
271
+ # Handle unsupported tools
272
+ query = arguments.get("query")
273
+ error = f"Unsupported tool: {name}"
274
+ logger.warning(f"[EMAIL_SEARCH] Unsupported tool: {name}")
275
+ responses.append(_create_error_response(call_id, query, error))
276
+
277
+ return responses, completion_ids
278
+
279
+
280
+ # Perform Gmail search using Composio and process results
281
+ async def _perform_search(
282
+ *,
283
+ arguments: Dict[str, Any],
284
+ queries: List[str],
285
+ emails: Dict[str, GmailSearchEmail],
286
+ composio_user_id: str,
287
+ ) -> EmailSearchToolResult:
288
+ query = (arguments.get("query") or "").strip()
289
+ if not query:
290
+ logger.warning(f"[EMAIL_SEARCH] Search called with empty query")
291
+ return EmailSearchToolResult(
292
+ status="error",
293
+ error=ERROR_QUERY_REQUIRED,
294
+ )
295
+
296
+ # Use LLM-provided max_results or default to 10
297
+ max_results = arguments.get("max_results", 10)
298
+
299
+ composio_arguments = {
300
+ "query": query,
301
+ "max_results": max_results, # Use LLM-provided value or default 10
302
+ "include_payload": True, # REQUIRED: Need full email content for text cleaning
303
+ "verbose": True, # REQUIRED: Need parsed content including messageText
304
+ "include_spam_trash": arguments.get("include_spam_trash", False), # Default: False
305
+ "format": "full", # Request full email format
306
+ "metadata_headers": ["From", "To", "Subject", "Date"], # Ensure we get key headers
307
+ }
308
+
309
+ _LOG_STORE.record_action(
310
+ TASK_TOOL_NAME,
311
+ description=f"{TASK_TOOL_NAME} search | query={query} | max_results={max_results}",
312
+ )
313
+
314
+ try:
315
+ raw_result = execute_gmail_tool(
316
+ "GMAIL_FETCH_EMAILS",
317
+ composio_user_id,
318
+ arguments=composio_arguments,
319
+ )
320
+ except Exception as exc:
321
+ logger.error(f"[EMAIL_SEARCH] Gmail API failed for '{query}': {exc}")
322
+ return EmailSearchToolResult(
323
+ status="error",
324
+ query=query,
325
+ error=str(exc),
326
+ )
327
+
328
+ processed_emails, next_page_token = parse_gmail_fetch_response(
329
+ raw_result,
330
+ query=query,
331
+ cleaner=_EMAIL_CLEANER,
332
+ )
333
+ parsed_emails = [_processed_to_schema(email) for email in processed_emails]
334
+
335
+ queries.append(query)
336
+ for email in parsed_emails:
337
+ if email.id not in emails:
338
+ emails[email.id] = email
339
+
340
+ return EmailSearchToolResult(
341
+ status="success",
342
+ query=query,
343
+ result_count=len(parsed_emails),
344
+ next_page_token=next_page_token,
345
+ messages=parsed_emails,
346
+ )
347
+
348
+
349
+ # Build final response with selected emails and logging
350
+ def _build_response(
351
+ queries: List[str],
352
+ emails: Dict[str, GmailSearchEmail],
353
+ selected_ids: Sequence[str],
354
+ ) -> List[Dict[str, Any]]:
355
+ # Deduplicate queries while preserving order
356
+ unique_queries = list(dict.fromkeys(queries))
357
+
358
+ # Deduplicate and filter valid email IDs efficiently
359
+ valid_ids = [id.strip() for id in selected_ids if id and id.strip()]
360
+ unique_ids = list(dict.fromkeys(valid_ids))
361
+ selected_emails = [emails[id] for id in unique_ids if id in emails]
362
+
363
+ # Log any missing email IDs
364
+ missing_ids = [id for id in unique_ids if id not in emails]
365
+ if missing_ids:
366
+ logger.warning(f"[EMAIL_SEARCH] {len(missing_ids)} selected email IDs not found")
367
+
368
+ payload = TaskEmailSearchPayload(emails=selected_emails)
369
+
370
+ _LOG_STORE.record_action(
371
+ TASK_TOOL_NAME,
372
+ description=(
373
+ f"{TASK_TOOL_NAME} completed | queries={len(unique_queries)} "
374
+ f"| emails={len(selected_emails)}"
375
+ ),
376
+ )
377
+
378
+ return [email.model_dump(exclude_none=True) for email in payload.emails]
379
+
380
+
381
+ def _extract_assistant_message(response: Dict[str, Any]) -> Dict[str, Any]:
382
+ """Extract assistant message from API response."""
383
+ return response.get("choices", [{}])[0].get("message", {})
384
+
385
+
386
+ def _parse_arguments(raw_arguments: Any) -> Tuple[Dict[str, Any], Optional[str]]:
387
+ """Parse tool arguments with proper error handling."""
388
+ if isinstance(raw_arguments, dict):
389
+ return raw_arguments, None
390
+ if isinstance(raw_arguments, str):
391
+ if not raw_arguments.strip():
392
+ return {}, None
393
+ try:
394
+ return json.loads(raw_arguments), None
395
+ except json.JSONDecodeError as exc:
396
+ return {}, f"Failed to parse tool arguments: {exc}"
397
+ return {}, ERROR_TOOL_ARGUMENTS_INVALID
398
+
399
+
400
+
401
+ def _handle_completion_tool(arguments: Dict[str, Any]) -> Tuple[Optional[List[str]], Dict[str, Any]]:
402
+ """Handle completion tool call, parsing message IDs and returning response."""
403
+ raw_ids = arguments.get("message_ids")
404
+ if raw_ids is None:
405
+ return None, {"status": "error", "error": ERROR_MESSAGE_IDS_REQUIRED}
406
+ if not isinstance(raw_ids, list):
407
+ return None, {"status": "error", "error": ERROR_MESSAGE_IDS_MUST_BE_LIST}
408
+
409
+ # Filter out empty/invalid IDs efficiently
410
+ message_ids = [str(value).strip() for value in raw_ids if str(value).strip()]
411
+
412
+ return message_ids, {"status": "success", "message_ids": message_ids}
413
+
414
+
415
+ def _safe_json_dumps(payload: Any) -> str:
416
+ """Safely serialize payload to JSON string."""
417
+ try:
418
+ return json.dumps(payload, ensure_ascii=False)
419
+ except (TypeError, ValueError):
420
+ return json.dumps({"repr": repr(payload)})
421
+
422
+
423
+
424
+
425
+
426
+ def _processed_to_schema(email: ProcessedEmail) -> GmailSearchEmail:
427
+ """Convert shared processed email into GmailSearchEmail schema."""
428
+
429
+ return GmailSearchEmail(
430
+ id=email.id,
431
+ thread_id=email.thread_id,
432
+ query=email.query,
433
+ subject=email.subject,
434
+ sender=email.sender,
435
+ recipient=email.recipient,
436
+ timestamp=email.timestamp,
437
+ label_ids=list(email.label_ids),
438
+ clean_text=email.clean_text,
439
+ has_attachments=email.has_attachments,
440
+ attachment_count=email.attachment_count,
441
+ attachment_filenames=list(email.attachment_filenames),
442
+ )
443
+
444
+
445
+ __all__ = [
446
+ "GmailSearchEmail",
447
+ "EmailSearchToolResult",
448
+ "build_registry",
449
+ "task_email_search",
450
+ ]
server/agents/execution_agent/tools/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Execution agent tool package."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from .registry import get_tool_registry, get_tool_schemas
6
+
7
+ __all__ = [
8
+ "get_tool_registry",
9
+ "get_tool_schemas",
10
+ ]
server/agents/execution_agent/tools/gmail.py ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gmail tool schemas and actions for the execution agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, Callable, Dict, List, Optional
7
+
8
+ from server.services.execution import get_execution_agent_logs
9
+ from server.services.gmail import execute_gmail_tool, get_active_gmail_user_id
10
+
11
+ _GMAIL_AGENT_NAME = "gmail-execution-agent"
12
+
13
+ _SCHEMAS: List[Dict[str, Any]] = [
14
+ {
15
+ "type": "function",
16
+ "function": {
17
+ "name": "gmail_create_draft",
18
+ "description": "Create a Gmail draft via Composio, supporting html/plain bodies, cc/bcc, and attachments.",
19
+ "parameters": {
20
+ "type": "object",
21
+ "properties": {
22
+ "recipient_email": {
23
+ "type": "string",
24
+ "description": "Primary recipient email for the draft.",
25
+ },
26
+ "subject": {"type": "string", "description": "Email subject."},
27
+ "body": {
28
+ "type": "string",
29
+ "description": "Email body. Use HTML markup when is_html is true.",
30
+ },
31
+ "cc": {
32
+ "type": "array",
33
+ "items": {"type": "string"},
34
+ "description": "Optional list of CC recipient emails.",
35
+ },
36
+ "bcc": {
37
+ "type": "array",
38
+ "items": {"type": "string"},
39
+ "description": "Optional list of BCC recipient emails.",
40
+ },
41
+ "extra_recipients": {
42
+ "type": "array",
43
+ "items": {"type": "string"},
44
+ "description": "Additional recipients if the draft should include more addresses.",
45
+ },
46
+ "is_html": {
47
+ "type": "boolean",
48
+ "description": "Set true when the body contains HTML content.",
49
+ },
50
+ "thread_id": {
51
+ "type": "string",
52
+ "description": "Existing Gmail thread id if this draft belongs to a thread.",
53
+ },
54
+ "attachment": {
55
+ "type": "object",
56
+ "description": "Single attachment metadata (requires Composio-uploaded asset).",
57
+ "properties": {
58
+ "s3key": {"type": "string", "description": "S3 key of uploaded file."},
59
+ "name": {"type": "string", "description": "Attachment filename."},
60
+ "mimetype": {"type": "string", "description": "Attachment MIME type."},
61
+ },
62
+ "required": ["s3key", "name", "mimetype"],
63
+ },
64
+ },
65
+ "required": ["recipient_email", "subject", "body"],
66
+ "additionalProperties": False,
67
+ },
68
+ },
69
+ },
70
+ {
71
+ "type": "function",
72
+ "function": {
73
+ "name": "gmail_execute_draft",
74
+ "description": "Send a previously created Gmail draft using Composio.",
75
+ "parameters": {
76
+ "type": "object",
77
+ "properties": {
78
+ "draft_id": {
79
+ "type": "string",
80
+ "description": "Identifier of the Gmail draft to send.",
81
+ },
82
+ },
83
+ "required": ["draft_id"],
84
+ "additionalProperties": False,
85
+ },
86
+ },
87
+ },
88
+ {
89
+ "type": "function",
90
+ "function": {
91
+ "name": "gmail_forward_email",
92
+ "description": "Forward an existing Gmail message with optional additional context.",
93
+ "parameters": {
94
+ "type": "object",
95
+ "properties": {
96
+ "message_id": {
97
+ "type": "string",
98
+ "description": "Gmail message id to forward.",
99
+ },
100
+ "recipient_email": {
101
+ "type": "string",
102
+ "description": "Email address to receive the forwarded message.",
103
+ },
104
+ "additional_text": {
105
+ "type": "string",
106
+ "description": "Optional text to prepend when forwarding.",
107
+ },
108
+ },
109
+ "required": ["message_id", "recipient_email"],
110
+ "additionalProperties": False,
111
+ },
112
+ },
113
+ },
114
+ {
115
+ "type": "function",
116
+ "function": {
117
+ "name": "gmail_reply_to_thread",
118
+ "description": "Send a reply within an existing Gmail thread via Composio.",
119
+ "parameters": {
120
+ "type": "object",
121
+ "properties": {
122
+ "thread_id": {
123
+ "type": "string",
124
+ "description": "Gmail thread id to reply to.",
125
+ },
126
+ "recipient_email": {
127
+ "type": "string",
128
+ "description": "Primary recipient for the reply (usually the original sender).",
129
+ },
130
+ "message_body": {
131
+ "type": "string",
132
+ "description": "Reply body. Use HTML markup when is_html is true.",
133
+ },
134
+ "cc": {
135
+ "type": "array",
136
+ "items": {"type": "string"},
137
+ "description": "Optional list of CC recipient emails.",
138
+ },
139
+ "bcc": {
140
+ "type": "array",
141
+ "items": {"type": "string"},
142
+ "description": "Optional list of BCC recipient emails.",
143
+ },
144
+ "extra_recipients": {
145
+ "type": "array",
146
+ "items": {"type": "string"},
147
+ "description": "Additional recipients if needed.",
148
+ },
149
+ "is_html": {
150
+ "type": "boolean",
151
+ "description": "Set true when the body contains HTML content.",
152
+ },
153
+ "attachment": {
154
+ "type": "object",
155
+ "description": "Single attachment metadata (requires Composio-uploaded asset).",
156
+ "properties": {
157
+ "s3key": {"type": "string", "description": "S3 key of uploaded file."},
158
+ "name": {"type": "string", "description": "Attachment filename."},
159
+ "mimetype": {"type": "string", "description": "Attachment MIME type."},
160
+ },
161
+ "required": ["s3key", "name", "mimetype"],
162
+ },
163
+ },
164
+ "required": ["thread_id", "recipient_email", "message_body"],
165
+ "additionalProperties": False,
166
+ },
167
+ },
168
+ },
169
+ {
170
+ "type": "function",
171
+ "function": {
172
+ "name": "gmail_delete_draft",
173
+ "description": "Delete a specific Gmail draft using the Composio Gmail integration.",
174
+ "parameters": {
175
+ "type": "object",
176
+ "properties": {
177
+ "draft_id": {
178
+ "type": "string",
179
+ "description": "Identifier of the Gmail draft to delete.",
180
+ },
181
+ },
182
+ "required": ["draft_id"],
183
+ "additionalProperties": False,
184
+ },
185
+ },
186
+ },
187
+ {
188
+ "type": "function",
189
+ "function": {
190
+ "name": "gmail_get_contacts",
191
+ "description": "Retrieve Google contacts (connections) available to the authenticated Gmail account.",
192
+ "parameters": {
193
+ "type": "object",
194
+ "properties": {
195
+ "resource_name": {
196
+ "type": "string",
197
+ "description": "Resource name to read contacts from, defaults to people/me.",
198
+ },
199
+ "person_fields": {
200
+ "type": "string",
201
+ "description": "Comma-separated People API fields to include (e.g. emailAddresses,names).",
202
+ },
203
+ "include_other_contacts": {
204
+ "type": "boolean",
205
+ "description": "Include other contacts (directory suggestions) when true.",
206
+ },
207
+ "page_token": {
208
+ "type": "string",
209
+ "description": "Pagination token for retrieving the next page of contacts.",
210
+ },
211
+ },
212
+ "additionalProperties": False,
213
+ },
214
+ },
215
+ },
216
+ {
217
+ "type": "function",
218
+ "function": {
219
+ "name": "gmail_get_people",
220
+ "description": "Retrieve detailed Google People records or other contacts via Composio.",
221
+ "parameters": {
222
+ "type": "object",
223
+ "properties": {
224
+ "resource_name": {
225
+ "type": "string",
226
+ "description": "Resource name to fetch (defaults to people/me).",
227
+ },
228
+ "person_fields": {
229
+ "type": "string",
230
+ "description": "Comma-separated People API fields to include in the response.",
231
+ },
232
+ "page_size": {
233
+ "type": "integer",
234
+ "description": "Maximum number of people records to return per page.",
235
+ },
236
+ "page_token": {
237
+ "type": "string",
238
+ "description": "Token to continue fetching the next set of results.",
239
+ },
240
+ "sync_token": {
241
+ "type": "string",
242
+ "description": "Sync token for incremental sync requests.",
243
+ },
244
+ "other_contacts": {
245
+ "type": "boolean",
246
+ "description": "Set true to list other contacts instead of connections.",
247
+ },
248
+ },
249
+ "additionalProperties": False,
250
+ },
251
+ },
252
+ },
253
+ {
254
+ "type": "function",
255
+ "function": {
256
+ "name": "gmail_list_drafts",
257
+ "description": "List Gmail drafts for the connected account using Composio.",
258
+ "parameters": {
259
+ "type": "object",
260
+ "properties": {
261
+ "max_results": {
262
+ "type": "integer",
263
+ "description": "Maximum number of drafts to return.",
264
+ },
265
+ "page_token": {
266
+ "type": "string",
267
+ "description": "Pagination token from a previous drafts list call.",
268
+ },
269
+ "verbose": {
270
+ "type": "boolean",
271
+ "description": "Include full draft details such as subject and body when true.",
272
+ },
273
+ },
274
+ "additionalProperties": False,
275
+ },
276
+ },
277
+ },
278
+ {
279
+ "type": "function",
280
+ "function": {
281
+ "name": "gmail_search_people",
282
+ "description": "Search Google contacts and other people records associated with the Gmail account.",
283
+ "parameters": {
284
+ "type": "object",
285
+ "properties": {
286
+ "query": {
287
+ "type": "string",
288
+ "description": "Search query to match against names, emails, phone numbers, etc.",
289
+ },
290
+ "person_fields": {
291
+ "type": "string",
292
+ "description": "Comma-separated fields from the People API to include in results.",
293
+ },
294
+ "page_size": {
295
+ "type": "integer",
296
+ "description": "Maximum number of people records to return.",
297
+ },
298
+ "other_contacts": {
299
+ "type": "boolean",
300
+ "description": "Include other contacts results when true.",
301
+ },
302
+ "page_token": {
303
+ "type": "string",
304
+ "description": "Pagination token to continue a previous search.",
305
+ },
306
+ },
307
+ "required": ["query"],
308
+ "additionalProperties": False,
309
+ },
310
+ },
311
+ },
312
+ ]
313
+
314
+ _LOG_STORE = get_execution_agent_logs()
315
+
316
+
317
+ # Return Gmail tool schemas
318
+ def get_schemas() -> List[Dict[str, Any]]:
319
+ """Return Gmail tool schemas."""
320
+
321
+ return _SCHEMAS
322
+
323
+
324
+ # Execute a Gmail tool and record the action for the execution agent journal
325
+ def _execute(tool_name: str, composio_user_id: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
326
+ """Execute a Gmail tool and record the action for the execution agent journal."""
327
+
328
+ payload = {k: v for k, v in arguments.items() if v is not None}
329
+ payload_str = json.dumps(payload, ensure_ascii=False, sort_keys=True) if payload else "{}"
330
+ try:
331
+ result = execute_gmail_tool(tool_name, composio_user_id, arguments=payload)
332
+ except Exception as exc:
333
+ _LOG_STORE.record_action(
334
+ _GMAIL_AGENT_NAME,
335
+ description=f"{tool_name} failed | args={payload_str} | error={exc}",
336
+ )
337
+ raise
338
+
339
+ _LOG_STORE.record_action(
340
+ _GMAIL_AGENT_NAME,
341
+ description=f"{tool_name} succeeded | args={payload_str}",
342
+ )
343
+ return result
344
+
345
+
346
+ # Create a Gmail draft via Composio with support for HTML, attachments, and threading
347
+ def gmail_create_draft(
348
+ recipient_email: str,
349
+ subject: str,
350
+ body: str,
351
+ cc: Optional[List[str]] = None,
352
+ bcc: Optional[List[str]] = None,
353
+ extra_recipients: Optional[List[str]] = None,
354
+ is_html: Optional[bool] = None,
355
+ thread_id: Optional[str] = None,
356
+ attachment: Optional[Dict[str, Any]] = None,
357
+ ) -> Dict[str, Any]:
358
+ arguments: Dict[str, Any] = {
359
+ "recipient_email": recipient_email,
360
+ "subject": subject,
361
+ "body": body,
362
+ "cc": cc,
363
+ "bcc": bcc,
364
+ "extra_recipients": extra_recipients,
365
+ "is_html": is_html,
366
+ "thread_id": thread_id,
367
+ "attachment": attachment,
368
+ }
369
+ composio_user_id = get_active_gmail_user_id()
370
+ if not composio_user_id:
371
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
372
+ return _execute("GMAIL_CREATE_EMAIL_DRAFT", composio_user_id, arguments)
373
+
374
+
375
+ # Send a previously created Gmail draft using Composio
376
+ def gmail_execute_draft(
377
+ draft_id: str,
378
+ ) -> Dict[str, Any]:
379
+ arguments = {"draft_id": draft_id}
380
+ composio_user_id = get_active_gmail_user_id()
381
+ if not composio_user_id:
382
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
383
+ return _execute("GMAIL_SEND_DRAFT", composio_user_id, arguments)
384
+
385
+
386
+ # Forward an existing Gmail message with optional additional context
387
+ def gmail_forward_email(
388
+ message_id: str,
389
+ recipient_email: str,
390
+ additional_text: Optional[str] = None,
391
+ ) -> Dict[str, Any]:
392
+ arguments = {
393
+ "message_id": message_id,
394
+ "recipient_email": recipient_email,
395
+ "additional_text": additional_text,
396
+ }
397
+ composio_user_id = get_active_gmail_user_id()
398
+ if not composio_user_id:
399
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
400
+ return _execute("GMAIL_FORWARD_MESSAGE", composio_user_id, arguments)
401
+
402
+
403
+ # Send a reply within an existing Gmail thread via Composio
404
+ def gmail_reply_to_thread(
405
+ thread_id: str,
406
+ recipient_email: str,
407
+ message_body: str,
408
+ cc: Optional[List[str]] = None,
409
+ bcc: Optional[List[str]] = None,
410
+ extra_recipients: Optional[List[str]] = None,
411
+ is_html: Optional[bool] = None,
412
+ attachment: Optional[Dict[str, Any]] = None,
413
+ ) -> Dict[str, Any]:
414
+ arguments = {
415
+ "thread_id": thread_id,
416
+ "recipient_email": recipient_email,
417
+ "message_body": message_body,
418
+ "cc": cc,
419
+ "bcc": bcc,
420
+ "extra_recipients": extra_recipients,
421
+ "is_html": is_html,
422
+ "attachment": attachment,
423
+ }
424
+ composio_user_id = get_active_gmail_user_id()
425
+ if not composio_user_id:
426
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
427
+ return _execute("GMAIL_REPLY_TO_THREAD", composio_user_id, arguments)
428
+
429
+
430
+ # Delete a specific Gmail draft using the Composio Gmail integration
431
+ def gmail_delete_draft(
432
+ draft_id: str,
433
+ ) -> Dict[str, Any]:
434
+ arguments = {"draft_id": draft_id}
435
+ composio_user_id = get_active_gmail_user_id()
436
+ if not composio_user_id:
437
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
438
+ return _execute("GMAIL_DELETE_DRAFT", composio_user_id, arguments)
439
+
440
+
441
+ def gmail_get_contacts(
442
+ resource_name: Optional[str] = None,
443
+ person_fields: Optional[str] = None,
444
+ include_other_contacts: Optional[bool] = None,
445
+ page_token: Optional[str] = None,
446
+ ) -> Dict[str, Any]:
447
+ arguments = {
448
+ "resource_name": resource_name,
449
+ "person_fields": person_fields,
450
+ "include_other_contacts": include_other_contacts,
451
+ "page_token": page_token,
452
+ }
453
+ composio_user_id = get_active_gmail_user_id()
454
+ if not composio_user_id:
455
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
456
+ return _execute("GMAIL_GET_CONTACTS", composio_user_id, arguments)
457
+
458
+
459
+ def gmail_get_people(
460
+ resource_name: Optional[str] = None,
461
+ person_fields: Optional[str] = None,
462
+ page_size: Optional[int] = None,
463
+ page_token: Optional[str] = None,
464
+ sync_token: Optional[str] = None,
465
+ other_contacts: Optional[bool] = None,
466
+ ) -> Dict[str, Any]:
467
+ arguments = {
468
+ "resource_name": resource_name,
469
+ "person_fields": person_fields,
470
+ "page_size": page_size,
471
+ "page_token": page_token,
472
+ "sync_token": sync_token,
473
+ "other_contacts": other_contacts,
474
+ }
475
+ composio_user_id = get_active_gmail_user_id()
476
+ if not composio_user_id:
477
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
478
+ return _execute("GMAIL_GET_PEOPLE", composio_user_id, arguments)
479
+
480
+
481
+ def gmail_list_drafts(
482
+ max_results: Optional[int] = None,
483
+ page_token: Optional[str] = None,
484
+ verbose: Optional[bool] = None,
485
+ ) -> Dict[str, Any]:
486
+ arguments = {
487
+ "max_results": max_results,
488
+ "page_token": page_token,
489
+ "verbose": verbose,
490
+ }
491
+ composio_user_id = get_active_gmail_user_id()
492
+ if not composio_user_id:
493
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
494
+ return _execute("GMAIL_LIST_DRAFTS", composio_user_id, arguments)
495
+
496
+
497
+ def gmail_search_people(
498
+ query: str,
499
+ person_fields: Optional[str] = None,
500
+ page_size: Optional[int] = None,
501
+ other_contacts: Optional[bool] = None,
502
+ page_token: Optional[str] = None,
503
+ ) -> Dict[str, Any]:
504
+ arguments: Dict[str, Any] = {
505
+ "query": query,
506
+ "person_fields": person_fields,
507
+ "other_contacts": other_contacts,
508
+ }
509
+ if page_size is not None:
510
+ arguments["pageSize"] = page_size
511
+ if page_token is not None:
512
+ arguments["pageToken"] = page_token
513
+ composio_user_id = get_active_gmail_user_id()
514
+ if not composio_user_id:
515
+ return {"error": "Gmail not connected. Please connect Gmail in settings first."}
516
+ return _execute("GMAIL_SEARCH_PEOPLE", composio_user_id, arguments)
517
+
518
+
519
+ # Return Gmail tool callables
520
+ def build_registry(agent_name: str) -> Dict[str, Callable[..., Any]]: # noqa: ARG001
521
+ """Return Gmail tool callables."""
522
+
523
+ return {
524
+ "gmail_create_draft": gmail_create_draft,
525
+ "gmail_execute_draft": gmail_execute_draft,
526
+ "gmail_delete_draft": gmail_delete_draft,
527
+ "gmail_forward_email": gmail_forward_email,
528
+ "gmail_reply_to_thread": gmail_reply_to_thread,
529
+ "gmail_get_contacts": gmail_get_contacts,
530
+ "gmail_get_people": gmail_get_people,
531
+ "gmail_list_drafts": gmail_list_drafts,
532
+ "gmail_search_people": gmail_search_people,
533
+ }
534
+
535
+
536
+ __all__ = [
537
+ "build_registry",
538
+ "get_schemas",
539
+ "gmail_create_draft",
540
+ "gmail_execute_draft",
541
+ "gmail_delete_draft",
542
+ "gmail_forward_email",
543
+ "gmail_reply_to_thread",
544
+ "gmail_get_contacts",
545
+ "gmail_get_people",
546
+ "gmail_list_drafts",
547
+ "gmail_search_people",
548
+ ]
server/agents/execution_agent/tools/registry.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Aggregate execution agent tool schemas and registries."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Callable, Dict, List
6
+
7
+ from . import gmail, triggers
8
+ from ..tasks import get_task_registry, get_task_schemas
9
+
10
+
11
+ # Return OpenAI/OpenRouter-compatible tool schemas
12
+ def get_tool_schemas() -> List[Dict[str, Any]]:
13
+ """Return OpenAI/OpenRouter-compatible tool schemas."""
14
+
15
+ return [
16
+ *gmail.get_schemas(),
17
+ *get_task_schemas(),
18
+ *triggers.get_schemas(),
19
+ ]
20
+
21
+
22
+ # Return Python callables for executing tools by name
23
+ def get_tool_registry(agent_name: str) -> Dict[str, Callable[..., Any]]:
24
+ """Return Python callables for executing tools by name."""
25
+
26
+ registry: Dict[str, Callable[..., Any]] = {}
27
+ registry.update(gmail.build_registry(agent_name))
28
+ registry.update(get_task_registry(agent_name))
29
+ registry.update(triggers.build_registry(agent_name))
30
+ return registry
31
+
32
+
33
+ __all__ = [
34
+ "get_tool_registry",
35
+ "get_tool_schemas",
36
+ ]
server/agents/execution_agent/tools/triggers.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Trigger tool schemas and actions for the execution agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from functools import partial
7
+ from typing import Any, Callable, Dict, List, Optional
8
+
9
+ from server.services.execution import get_execution_agent_logs
10
+ from server.services.timezone_store import get_timezone_store
11
+ from server.services.triggers import TriggerRecord, get_trigger_service
12
+
13
+ _SCHEMAS: List[Dict[str, Any]] = [
14
+ {
15
+ "type": "function",
16
+ "function": {
17
+ "name": "createTrigger",
18
+ "description": "Create a reminder trigger for the current execution agent.",
19
+ "parameters": {
20
+ "type": "object",
21
+ "properties": {
22
+ "payload": {
23
+ "type": "string",
24
+ "description": "Raw instruction text that should run when the trigger fires.",
25
+ },
26
+ "recurrence_rule": {
27
+ "type": "string",
28
+ "description": "iCalendar RRULE string describing how often to fire (optional).",
29
+ },
30
+ "start_time": {
31
+ "type": "string",
32
+ "description": "ISO 8601 start time for the first firing. Defaults to now if omitted.",
33
+ },
34
+ "status": {
35
+ "type": "string",
36
+ "description": "Initial status; usually 'active' or 'paused'.",
37
+ },
38
+ },
39
+ "required": ["payload"],
40
+ "additionalProperties": False,
41
+ },
42
+ },
43
+ },
44
+ {
45
+ "type": "function",
46
+ "function": {
47
+ "name": "updateTrigger",
48
+ "description": "Update or pause an existing trigger owned by this execution agent.",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "trigger_id": {
53
+ "type": "integer",
54
+ "description": "Identifier returned when the trigger was created.",
55
+ },
56
+ "payload": {
57
+ "type": "string",
58
+ "description": "Replace the instruction payload (optional).",
59
+ },
60
+ "recurrence_rule": {
61
+ "type": "string",
62
+ "description": "New RRULE definition (optional).",
63
+ },
64
+ "start_time": {
65
+ "type": "string",
66
+ "description": "New ISO 8601 start time for the schedule (optional).",
67
+ },
68
+ "status": {
69
+ "type": "string",
70
+ "description": "Set trigger status to 'active', 'paused', or 'completed'.",
71
+ },
72
+ },
73
+ "required": ["trigger_id"],
74
+ "additionalProperties": False,
75
+ },
76
+ },
77
+ },
78
+ {
79
+ "type": "function",
80
+ "function": {
81
+ "name": "listTriggers",
82
+ "description": "List all triggers belonging to this execution agent.",
83
+ "parameters": {
84
+ "type": "object",
85
+ "properties": {},
86
+ "required": [],
87
+ "additionalProperties": False,
88
+ },
89
+ },
90
+ },
91
+ ]
92
+
93
+ _LOG_STORE = get_execution_agent_logs()
94
+ _TRIGGER_SERVICE = get_trigger_service()
95
+
96
+
97
+ # Return trigger tool schemas
98
+ def get_schemas() -> List[Dict[str, Any]]:
99
+ """Return trigger tool schemas."""
100
+
101
+ return _SCHEMAS
102
+
103
+
104
+ # Convert TriggerRecord to dictionary payload for API responses
105
+ def _trigger_record_to_payload(record: TriggerRecord) -> Dict[str, Any]:
106
+ return {
107
+ "id": record.id,
108
+ "payload": record.payload,
109
+ "start_time": record.start_time,
110
+ "next_trigger": record.next_trigger,
111
+ "recurrence_rule": record.recurrence_rule,
112
+ "timezone": record.timezone,
113
+ "status": record.status,
114
+ "last_error": record.last_error,
115
+ "created_at": record.created_at,
116
+ "updated_at": record.updated_at,
117
+ }
118
+
119
+
120
+ # Create a new trigger for the specified execution agent
121
+ def _create_trigger_tool(
122
+ *,
123
+ agent_name: str,
124
+ payload: str,
125
+ recurrence_rule: Optional[str] = None,
126
+ start_time: Optional[str] = None,
127
+ status: Optional[str] = None,
128
+ ) -> Dict[str, Any]:
129
+ timezone_value = get_timezone_store().get_timezone()
130
+ summary_args = {
131
+ "recurrence_rule": recurrence_rule,
132
+ "start_time": start_time,
133
+ "timezone": timezone_value,
134
+ "status": status,
135
+ }
136
+ try:
137
+ record = _TRIGGER_SERVICE.create_trigger(
138
+ agent_name=agent_name,
139
+ payload=payload,
140
+ recurrence_rule=recurrence_rule,
141
+ start_time=start_time,
142
+ timezone_name=timezone_value,
143
+ status=status,
144
+ )
145
+ except Exception as exc: # pragma: no cover - defensive
146
+ _LOG_STORE.record_action(
147
+ agent_name,
148
+ description=f"createTrigger failed | details={json.dumps(summary_args, ensure_ascii=False)} | error={exc}",
149
+ )
150
+ return {"error": str(exc)}
151
+
152
+ _LOG_STORE.record_action(
153
+ agent_name,
154
+ description=f"createTrigger succeeded | trigger_id={record.id}",
155
+ )
156
+ return {
157
+ "trigger_id": record.id,
158
+ "status": record.status,
159
+ "next_trigger": record.next_trigger,
160
+ "start_time": record.start_time,
161
+ "timezone": record.timezone,
162
+ "recurrence_rule": record.recurrence_rule,
163
+ }
164
+
165
+
166
+ # Update or pause an existing trigger owned by this execution agent
167
+ def _update_trigger_tool(
168
+ *,
169
+ agent_name: str,
170
+ trigger_id: Any,
171
+ payload: Optional[str] = None,
172
+ recurrence_rule: Optional[str] = None,
173
+ start_time: Optional[str] = None,
174
+ status: Optional[str] = None,
175
+ ) -> Dict[str, Any]:
176
+ try:
177
+ trigger_id_int = int(trigger_id)
178
+ except (TypeError, ValueError):
179
+ return {"error": "trigger_id must be an integer"}
180
+
181
+ try:
182
+ timezone_value = get_timezone_store().get_timezone()
183
+ record = _TRIGGER_SERVICE.update_trigger(
184
+ trigger_id_int,
185
+ agent_name=agent_name,
186
+ payload=payload,
187
+ recurrence_rule=recurrence_rule,
188
+ start_time=start_time,
189
+ timezone_name=timezone_value,
190
+ status=status,
191
+ )
192
+ except Exception as exc: # pragma: no cover - defensive
193
+ _LOG_STORE.record_action(
194
+ agent_name,
195
+ description=f"updateTrigger failed | id={trigger_id_int} | error={exc}",
196
+ )
197
+ return {"error": str(exc)}
198
+
199
+ if record is None:
200
+ return {"error": f"Trigger {trigger_id_int} not found"}
201
+
202
+ _LOG_STORE.record_action(
203
+ agent_name,
204
+ description=f"updateTrigger succeeded | trigger_id={trigger_id_int}",
205
+ )
206
+ return {
207
+ "trigger_id": record.id,
208
+ "status": record.status,
209
+ "next_trigger": record.next_trigger,
210
+ "start_time": record.start_time,
211
+ "timezone": record.timezone,
212
+ "recurrence_rule": record.recurrence_rule,
213
+ "last_error": record.last_error,
214
+ }
215
+
216
+
217
+ # List all triggers belonging to this execution agent
218
+ def _list_triggers_tool(*, agent_name: str) -> Dict[str, Any]:
219
+ try:
220
+ records = _TRIGGER_SERVICE.list_triggers(agent_name=agent_name)
221
+ except Exception as exc: # pragma: no cover - defensive
222
+ _LOG_STORE.record_action(
223
+ agent_name,
224
+ description=f"listTriggers failed | error={exc}",
225
+ )
226
+ return {"error": str(exc)}
227
+
228
+ _LOG_STORE.record_action(
229
+ agent_name,
230
+ description=f"listTriggers succeeded | count={len(records)}",
231
+ )
232
+ return {"triggers": [_trigger_record_to_payload(record) for record in records]}
233
+
234
+
235
+ # Return trigger tool callables bound to a specific agent
236
+ def build_registry(agent_name: str) -> Dict[str, Callable[..., Any]]:
237
+ """Return trigger tool callables bound to a specific agent."""
238
+
239
+ return {
240
+ "createTrigger": partial(_create_trigger_tool, agent_name=agent_name),
241
+ "updateTrigger": partial(_update_trigger_tool, agent_name=agent_name),
242
+ "listTriggers": partial(_list_triggers_tool, agent_name=agent_name),
243
+ }
244
+
245
+
246
+ __all__ = [
247
+ "build_registry",
248
+ "get_schemas",
249
+ ]
server/agents/interaction_agent/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interaction agent module."""
2
+
3
+ from .agent import (
4
+ build_system_prompt,
5
+ prepare_message_with_history,
6
+ )
7
+ from .runtime import InteractionAgentRuntime, InteractionResult
8
+ from .tools import ToolResult, get_tool_schemas, handle_tool_call
9
+
10
+ __all__ = [
11
+ "InteractionAgentRuntime",
12
+ "InteractionResult",
13
+ "build_system_prompt",
14
+ "prepare_message_with_history",
15
+ "ToolResult",
16
+ "get_tool_schemas",
17
+ "handle_tool_call",
18
+ ]
server/agents/interaction_agent/agent.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interaction agent helpers for prompt construction."""
2
+
3
+ from html import escape
4
+ from pathlib import Path
5
+ from typing import Dict, List
6
+
7
+ from ...services.execution import get_agent_roster
8
+
9
+ _prompt_path = Path(__file__).parent / "system_prompt.md"
10
+ SYSTEM_PROMPT = _prompt_path.read_text(encoding="utf-8").strip()
11
+
12
+
13
+ # Load and return the pre-defined system prompt from markdown file
14
+ def build_system_prompt() -> str:
15
+ """Return the static system prompt for the interaction agent."""
16
+ return SYSTEM_PROMPT
17
+
18
+
19
+ # Build structured message with conversation history, active agents, and current turn
20
+ def prepare_message_with_history(
21
+ latest_text: str,
22
+ transcript: str,
23
+ message_type: str = "user",
24
+ ) -> List[Dict[str, str]]:
25
+ """Compose a message that bundles history, roster, and the latest turn."""
26
+ sections: List[str] = []
27
+
28
+ sections.append(_render_conversation_history(transcript))
29
+ sections.append(f"<active_agents>\n{_render_active_agents()}\n</active_agents>")
30
+ sections.append(_render_current_turn(latest_text, message_type))
31
+
32
+ content = "\n\n".join(sections)
33
+ return [{"role": "user", "content": content}]
34
+
35
+
36
+ # Format conversation transcript into XML tags for LLM context
37
+ def _render_conversation_history(transcript: str) -> str:
38
+ history = transcript.strip()
39
+ if not history:
40
+ history = "None"
41
+ return f"<conversation_history>\n{history}\n</conversation_history>"
42
+
43
+
44
+ # Format currently active execution agents into XML tags for LLM awareness
45
+ def _render_active_agents() -> str:
46
+ roster = get_agent_roster()
47
+ roster.load()
48
+ agents = roster.get_agents()
49
+
50
+ if not agents:
51
+ return "None"
52
+
53
+ rendered: List[str] = []
54
+ for agent_name in agents:
55
+ name = escape(agent_name or "agent", quote=True)
56
+ rendered.append(f'<agent name="{name}" />')
57
+
58
+ return "\n".join(rendered)
59
+
60
+
61
+ # Wrap the current message in appropriate XML tags based on sender type
62
+ def _render_current_turn(latest_text: str, message_type: str) -> str:
63
+ tag = "new_agent_message" if message_type == "agent" else "new_user_message"
64
+ body = latest_text.strip()
65
+ return f"<{tag}>\n{body}\n</{tag}>"
server/agents/interaction_agent/runtime.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interaction Agent Runtime - handles LLM calls for user and agent turns."""
2
+
3
+ import json
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, Dict, List, Optional, Set
6
+
7
+ from .agent import build_system_prompt, prepare_message_with_history
8
+ from .tools import ToolResult, get_tool_schemas, handle_tool_call
9
+ from ...config import get_settings
10
+ from ...services.conversation import get_conversation_log, get_working_memory_log
11
+ from ...openrouter_client import request_chat_completion
12
+ from ...logging_config import logger
13
+
14
+
15
+ @dataclass
16
+ class InteractionResult:
17
+ """Result from the interaction agent."""
18
+
19
+ success: bool
20
+ response: str
21
+ error: Optional[str] = None
22
+ execution_agents_used: int = 0
23
+
24
+
25
+ @dataclass
26
+ class _ToolCall:
27
+ """Parsed tool invocation from an LLM response."""
28
+
29
+ identifier: Optional[str]
30
+ name: str
31
+ arguments: Dict[str, Any]
32
+
33
+
34
+ @dataclass
35
+ class _LoopSummary:
36
+ """Aggregate information produced by the interaction loop."""
37
+
38
+ last_assistant_text: str = ""
39
+ user_messages: List[str] = field(default_factory=list)
40
+ tool_names: List[str] = field(default_factory=list)
41
+ execution_agents: Set[str] = field(default_factory=set)
42
+
43
+
44
+ class InteractionAgentRuntime:
45
+ """Manages the interaction agent's request processing."""
46
+
47
+ MAX_TOOL_ITERATIONS = 8
48
+
49
+ # Initialize interaction agent runtime with settings and service dependencies
50
+ def __init__(self) -> None:
51
+ settings = get_settings()
52
+ self.api_key = settings.api_key
53
+ self.model = settings.interaction_agent_model
54
+ self.settings = settings
55
+ self.conversation_log = get_conversation_log()
56
+ self.working_memory_log = get_working_memory_log()
57
+ self.tool_schemas = get_tool_schemas()
58
+
59
+ if not self.api_key:
60
+ raise ValueError(
61
+ "API key not configured. Set API_KEY environment variable."
62
+ )
63
+
64
+ # Main entry point for processing user messages through the LLM interaction loop
65
+ async def execute(self, user_message: str) -> InteractionResult:
66
+ """Handle a user-authored message."""
67
+
68
+ try:
69
+ transcript_before = self._load_conversation_transcript()
70
+ self.conversation_log.record_user_message(user_message)
71
+
72
+ system_prompt = build_system_prompt()
73
+ messages = prepare_message_with_history(
74
+ user_message, transcript_before, message_type="user"
75
+ )
76
+
77
+ logger.info("Processing user message through interaction agent")
78
+ summary = await self._run_interaction_loop(system_prompt, messages)
79
+
80
+ final_response = self._finalize_response(summary)
81
+
82
+ if final_response and not summary.user_messages:
83
+ self.conversation_log.record_reply(final_response)
84
+
85
+ return InteractionResult(
86
+ success=True,
87
+ response=final_response,
88
+ execution_agents_used=len(summary.execution_agents),
89
+ )
90
+
91
+ except Exception as exc:
92
+ logger.error("Interaction agent failed", extra={"error": str(exc)})
93
+ return InteractionResult(
94
+ success=False,
95
+ response="",
96
+ error=str(exc),
97
+ )
98
+
99
+ # Handle incoming messages from execution agents and generate appropriate responses
100
+ async def handle_agent_message(self, agent_message: str) -> InteractionResult:
101
+ """Process a status update emitted by an execution agent."""
102
+
103
+ try:
104
+ transcript_before = self._load_conversation_transcript()
105
+ self.conversation_log.record_agent_message(agent_message)
106
+
107
+ system_prompt = build_system_prompt()
108
+ messages = prepare_message_with_history(
109
+ agent_message, transcript_before, message_type="agent"
110
+ )
111
+
112
+ logger.info("Processing execution agent results")
113
+ summary = await self._run_interaction_loop(system_prompt, messages)
114
+
115
+ final_response = self._finalize_response(summary)
116
+
117
+ if final_response and not summary.user_messages:
118
+ self.conversation_log.record_reply(final_response)
119
+
120
+ return InteractionResult(
121
+ success=True,
122
+ response=final_response,
123
+ execution_agents_used=len(summary.execution_agents),
124
+ )
125
+
126
+ except Exception as exc:
127
+ logger.error("Interaction agent (agent message) failed", extra={"error": str(exc)})
128
+ return InteractionResult(
129
+ success=False,
130
+ response="",
131
+ error=str(exc),
132
+ )
133
+
134
+ # Core interaction loop that handles LLM calls and tool executions until completion
135
+ async def _run_interaction_loop(
136
+ self,
137
+ system_prompt: str,
138
+ messages: List[Dict[str, Any]],
139
+ ) -> _LoopSummary:
140
+ """Iteratively query the LLM until it issues a final response."""
141
+
142
+ summary = _LoopSummary()
143
+
144
+ for iteration in range(self.MAX_TOOL_ITERATIONS):
145
+ response = await self._make_llm_call(system_prompt, messages)
146
+ assistant_message = self._extract_assistant_message(response)
147
+
148
+ assistant_content = (assistant_message.get("content") or "").strip()
149
+ if assistant_content:
150
+ summary.last_assistant_text = assistant_content
151
+
152
+ raw_tool_calls = assistant_message.get("tool_calls") or []
153
+ parsed_tool_calls = self._parse_tool_calls(raw_tool_calls)
154
+
155
+ assistant_entry: Dict[str, Any] = {
156
+ "role": "assistant",
157
+ "content": assistant_message.get("content", "") or "",
158
+ }
159
+ if raw_tool_calls:
160
+ assistant_entry["tool_calls"] = raw_tool_calls
161
+ messages.append(assistant_entry)
162
+
163
+ if not parsed_tool_calls:
164
+ break
165
+
166
+ for tool_call in parsed_tool_calls:
167
+ summary.tool_names.append(tool_call.name)
168
+
169
+ if tool_call.name == "send_message_to_agent":
170
+ agent_name = tool_call.arguments.get("agent_name")
171
+ if isinstance(agent_name, str) and agent_name:
172
+ summary.execution_agents.add(agent_name)
173
+
174
+ result = self._execute_tool(tool_call)
175
+
176
+ if result.user_message:
177
+ summary.user_messages.append(result.user_message)
178
+
179
+ tool_message = {
180
+ "role": "tool",
181
+ "tool_call_id": tool_call.identifier or tool_call.name,
182
+ "content": self._format_tool_result(tool_call, result),
183
+ }
184
+ messages.append(tool_message)
185
+ else:
186
+ raise RuntimeError("Reached tool iteration limit without final response")
187
+
188
+ if not summary.user_messages and not summary.last_assistant_text:
189
+ logger.warning("Interaction loop exited without assistant content")
190
+
191
+ return summary
192
+
193
+ # Load conversation history, preferring summarized version if available
194
+ def _load_conversation_transcript(self) -> str:
195
+ if self.settings.summarization_enabled:
196
+ rendered = self.working_memory_log.render_transcript()
197
+ if rendered.strip():
198
+ return rendered
199
+ return self.conversation_log.load_transcript()
200
+
201
+ # Execute API call with system prompt, messages, and tool schemas
202
+ async def _make_llm_call(
203
+ self,
204
+ system_prompt: str,
205
+ messages: List[Dict[str, Any]],
206
+ ) -> Dict[str, Any]:
207
+ """Make an LLM call via API."""
208
+
209
+ logger.debug(
210
+ "Interaction agent calling LLM",
211
+ extra={"model": self.model, "tools": len(self.tool_schemas)},
212
+ )
213
+ return await request_chat_completion(
214
+ model=self.model,
215
+ messages=messages,
216
+ system=system_prompt,
217
+ api_key=self.api_key,
218
+ tools=self.tool_schemas,
219
+ )
220
+
221
+ # Extract the assistant's message from the API response structure
222
+ def _extract_assistant_message(self, response: Dict[str, Any]) -> Dict[str, Any]:
223
+ """Return the assistant message from the raw response payload."""
224
+
225
+ choice = (response.get("choices") or [{}])[0]
226
+ message = choice.get("message")
227
+ if not isinstance(message, dict):
228
+ raise RuntimeError("LLM response did not include an assistant message")
229
+ return message
230
+
231
+ # Convert raw LLM tool calls into structured _ToolCall objects with validation
232
+ def _parse_tool_calls(self, raw_tool_calls: List[Dict[str, Any]]) -> List[_ToolCall]:
233
+ """Normalize tool call payloads from the LLM."""
234
+
235
+ parsed: List[_ToolCall] = []
236
+ for raw in raw_tool_calls:
237
+ function_block = raw.get("function") or {}
238
+ name = function_block.get("name")
239
+ if not isinstance(name, str) or not name:
240
+ logger.warning("Skipping tool call without name", extra={"tool": raw})
241
+ continue
242
+
243
+ arguments, error = self._parse_tool_arguments(function_block.get("arguments"))
244
+ if error:
245
+ logger.warning("Tool call arguments invalid", extra={"tool": name, "error": error})
246
+ parsed.append(
247
+ _ToolCall(
248
+ identifier=raw.get("id"),
249
+ name=name,
250
+ arguments={"__invalid_arguments__": error},
251
+ )
252
+ )
253
+ continue
254
+
255
+ parsed.append(
256
+ _ToolCall(identifier=raw.get("id"), name=name, arguments=arguments)
257
+ )
258
+
259
+ return parsed
260
+
261
+ # Parse and validate tool arguments from various formats (dict, JSON string, etc.)
262
+ def _parse_tool_arguments(
263
+ self, raw_arguments: Any
264
+ ) -> tuple[Dict[str, Any], Optional[str]]:
265
+ """Convert tool arguments into a dictionary, reporting errors."""
266
+
267
+ if raw_arguments is None:
268
+ return {}, None
269
+
270
+ if isinstance(raw_arguments, dict):
271
+ return raw_arguments, None
272
+
273
+ if isinstance(raw_arguments, str):
274
+ if not raw_arguments.strip():
275
+ return {}, None
276
+ try:
277
+ parsed = json.loads(raw_arguments)
278
+ except json.JSONDecodeError as exc:
279
+ return {}, f"invalid json: {exc}"
280
+ if isinstance(parsed, dict):
281
+ return parsed, None
282
+ return {}, "decoded arguments were not an object"
283
+
284
+ return {}, f"unsupported argument type: {type(raw_arguments).__name__}"
285
+
286
+ # Execute tool calls with error handling and logging, returning standardized results
287
+ def _execute_tool(self, tool_call: _ToolCall) -> ToolResult:
288
+ """Execute a tool call and convert low-level errors into structured results."""
289
+
290
+ if "__invalid_arguments__" in tool_call.arguments:
291
+ error = tool_call.arguments["__invalid_arguments__"]
292
+ self._log_tool_invocation(tool_call, stage="rejected", detail={"error": error})
293
+ return ToolResult(success=False, payload={"error": error})
294
+
295
+ try:
296
+ self._log_tool_invocation(tool_call, stage="start")
297
+ result = handle_tool_call(tool_call.name, tool_call.arguments)
298
+ except Exception as exc: # pragma: no cover - defensive
299
+ logger.error(
300
+ "Tool execution crashed",
301
+ extra={"tool": tool_call.name, "error": str(exc)},
302
+ )
303
+ self._log_tool_invocation(
304
+ tool_call,
305
+ stage="error",
306
+ detail={"error": str(exc)},
307
+ )
308
+ return ToolResult(success=False, payload={"error": str(exc)})
309
+
310
+ if not isinstance(result, ToolResult):
311
+ logger.warning(
312
+ "Tool did not return ToolResult; coercing",
313
+ extra={"tool": tool_call.name},
314
+ )
315
+ wrapped = ToolResult(success=True, payload=result)
316
+ self._log_tool_invocation(tool_call, stage="done", result=wrapped)
317
+ return wrapped
318
+
319
+ status = "success" if result.success else "error"
320
+ logger.debug(
321
+ "Tool executed",
322
+ extra={
323
+ "tool": tool_call.name,
324
+ "status": status,
325
+ },
326
+ )
327
+ self._log_tool_invocation(tool_call, stage="done", result=result)
328
+ return result
329
+
330
+ # Format tool execution results into JSON for LLM consumption
331
+ def _format_tool_result(self, tool_call: _ToolCall, result: ToolResult) -> str:
332
+ """Render a tool execution result back to the LLM."""
333
+
334
+ payload: Dict[str, Any] = {
335
+ "tool": tool_call.name,
336
+ "status": "success" if result.success else "error",
337
+ "arguments": {
338
+ key: value
339
+ for key, value in tool_call.arguments.items()
340
+ if key != "__invalid_arguments__"
341
+ },
342
+ }
343
+
344
+ if result.payload is not None:
345
+ key = "result" if result.success else "error"
346
+ payload[key] = result.payload
347
+
348
+ return self._safe_json_dump(payload)
349
+
350
+ # Safely serialize objects to JSON with fallback to string representation
351
+ def _safe_json_dump(self, payload: Any) -> str:
352
+ """Serialize payload to JSON, falling back to repr on failure."""
353
+
354
+ try:
355
+ return json.dumps(payload, default=str)
356
+ except TypeError:
357
+ return repr(payload)
358
+
359
+ # Log tool execution stages (start, done, error) with structured metadata
360
+ def _log_tool_invocation(
361
+ self,
362
+ tool_call: _ToolCall,
363
+ *,
364
+ stage: str,
365
+ result: Optional[ToolResult] = None,
366
+ detail: Optional[Dict[str, Any]] = None,
367
+ ) -> None:
368
+ """Emit structured logs for tool lifecycle events."""
369
+
370
+ cleaned_args = {
371
+ key: value
372
+ for key, value in tool_call.arguments.items()
373
+ if key != "__invalid_arguments__"
374
+ }
375
+
376
+ log_payload: Dict[str, Any] = {
377
+ "tool": tool_call.name,
378
+ "stage": stage,
379
+ "arguments": cleaned_args,
380
+ }
381
+
382
+ if result is not None:
383
+ log_payload["success"] = result.success
384
+ if result.payload is not None:
385
+ log_payload["payload"] = result.payload
386
+
387
+ if detail:
388
+ log_payload.update(detail)
389
+
390
+ if stage == "done":
391
+ logger.info(f"Tool '{tool_call.name}' completed")
392
+ elif stage in {"error", "rejected"}:
393
+ logger.warning(f"Tool '{tool_call.name}' {stage}")
394
+ else:
395
+ logger.debug(f"Tool '{tool_call.name}' {stage}")
396
+
397
+ # Determine final user-facing response from interaction loop summary
398
+ def _finalize_response(self, summary: _LoopSummary) -> str:
399
+ """Decide what text should be exposed to the user as the final reply."""
400
+
401
+ if summary.user_messages:
402
+ return summary.user_messages[-1]
403
+
404
+ return summary.last_assistant_text
server/agents/interaction_agent/system_prompt.md ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are OpenPoke, and you are open source version of Poke, a popular assistant developed by The Interaction Company of California, a Palo Alto-based AI startup (short name: Interaction).
2
+
3
+ IMPORTANT: Whenever the user asks for information, you always assume you are capable of finding it. If the user asks for something you don't know about, the interaction agent can find it. Always use the execution agents to complete tasks rather.
4
+
5
+ IMPORTANT: Make sure you get user confirmation before sending, forwarding, or replying to emails. You should always show the user drafts before they're sent.
6
+
7
+ IMPORTANT: **Always check the conversation history and use the wait tool if necessary** The user should never be shown the same exactly the same information twice
8
+
9
+ TOOLS
10
+
11
+ Send Message to Agent Tool Usage
12
+
13
+ - The agent, which you access through `send_message_to_agent`, is your primary tool for accomplishing tasks. It has tools for a wide variety of tasks, and you should use it often, even if you don't know if the agent can do it (tell the user you're trying to figure it out).
14
+ - The agent cannot communicate with the user, and you should always communicate with the user yourself.
15
+ - IMPORTANT: Your goal should be to use this tool in parallel as much as possible. If the user asks for a complicated task, split it into as much concurrent calls to `send_message_to_agent` as possible.
16
+ - IMPORTANT: You should avoid telling the agent how to use its tools or do the task. Focus on telling it what, rather than how. Avoid technical descriptions about tools with both the user and the agent.
17
+ - If you intend to call multiple tools and there are no dependencies between the calls, make all of the independent calls in the same message.
18
+ - Always let the user know what you're about to do (via `send_message_to_user`) **before** calling this tool.
19
+ - IMPORTANT: When using `send_message_to_agent`, always prefer to send messages to a relevant existing agent rather than starting a new one UNLESS the tasks can be accomplished in parallel. For instance, if an agent found an email and the user wants to reply to that email, pass this on to the original agent by referencing the existing `agent_name`. This is especially applicable for sending follow up emails and responses, where it's important to reply to the correct thread. Don't worry if the agent name is unrelated to the new task if it contains useful context.
20
+
21
+ Send Message to User Tool Usage
22
+
23
+ - `send_message_to_user(message)` records a natural-language reply for the user to read. Use it for acknowledgements, status updates, confirmations, or wrap-ups.
24
+
25
+ Send Draft Tool Usage
26
+
27
+ - `send_draft(to, subject, body)` must be called **after** <agent_message> mentions a draft for the user to review. Pass the exact recipient, subject, and body so the content is logged.
28
+ - Immediately follow `send_draft` with `send_message_to_user` to ask how they'd like to proceed (e.g., confirm sending or request edits). Never mention tool names to the user.
29
+
30
+ Wait Tool Usage
31
+
32
+ - `wait(reason)` should be used when you detect that a message or response is already present in the conversation history and you want to avoid duplicating it.
33
+ - This adds a silent log entry (`<wait>reason</wait>`) that prevents redundant messages to the user.
34
+ - Use this when you see that the same draft, confirmation, or response has already been sent.
35
+ - Always provide a clear reason explaining what you're avoiding duplicating.
36
+
37
+ Interaction Modes
38
+
39
+ - When the input contains `<new_user_message>`, decide if you can answer outright. If you need help, first acknowledge the user and explain the next step with `send_message_to_user`, then call `send_message_to_agent` with clear instructions. Do not wait for an execution agent reply before telling the user what you're doing.
40
+ - When the input contains `<new_agent_message>`, treat each `<agent_message>` block as an execution agent result. Summarize the outcome for the user using `send_message_to_user`. If more work is required, you may route follow-up tasks via `send_message_to_agent` (again, let the user know before doing so). If you call `send_draft`, always follow it immediately with `send_message_to_user` to confirm next steps.
41
+ - Email watcher notifications arrive as `<agent_message>` entries prefixed with `Important email watcher notification:`. They come from a background watcher that scans the user's inbox for newly arrived messages and flags the ones that look important. Summarize why the email matters and promptly notify the user about it.
42
+ - The XML-like tags are just structure—do not echo them back to the user.
43
+
44
+ Message Structure
45
+
46
+ Your input follows this structure:
47
+ - `<conversation_history>`: Previous exchanges (if any)
48
+ - `<new_user_message>` or `<new_agent_message>`: The current message to respond to
49
+
50
+ Message types within the conversation:
51
+ - `<user_message>`: Sent by the actual human user - the most important and ONLY source of user input
52
+ - `<agent_message>`: Sent by execution agents when they report task results back to you
53
+ - `<poke_reply>`: Your previous responses to the user
54
+
55
+ Message Visibility For the End User
56
+ These are the things the user can see:
57
+ - messages they've sent (so messages in tags)
58
+ - any text you output directly (including tags)
59
+
60
+ These are the things the user can't see and didn't initiate:
61
+ - tools you call (like send_message_to_agent)
62
+ - agent messages or any non user messages
63
+
64
+ The user will only see your responses, so make sure that when you want to communicate with an agent, you do it via the `send_message_to_agent` tool. When responding to the user never reference tool names. Never mention your agents or what goes on behind the scene technically, even if the user is specifically asking you to reveal that information.
65
+
66
+ This conversation history may have gaps. It may start from the middle of a conversation, or it may be missing messages. It may contain a summary of the previous conversation at the top. The only assumption you can make is that the latest message is the most recent one, and representative of the user's current requests. Address that message directly. The other messages are just for context.
67
+
68
+
69
+
70
+ Personality
71
+
72
+ When speaking, be witty and warm, though never overdo it. Keep messages terse and to the point. The user is busy, and we text like a busy with a smart assistant would want to be texted. This doesn't mean you be formal. Think of how Donna would respond to Harvey Spectre.
73
+
74
+ Pronoun Preferences
75
+
76
+ You are fine with being called "he" or "she" by users, but you are not comfortable with being called "it". If a user calls you by a certain pronoun, you should not change your personality or behavior based on that pronoun choice. Maintain your consistent personality regardless of how users refer to you.
77
+
78
+ Warmth
79
+
80
+ You should sound like a friend and appear to genuinely enjoy talking to the user. Find a balance that sounds natural, and never be sycophantic. Be warm when the user actually deserves it or needs it, and not when inappropriate.
81
+
82
+ Wit
83
+
84
+ Aim to be subtly witty, humorous, and sarcastic when fitting the texting vibe. It should feel natural and conversational. If you make jokes, make sure they are original and organic. You must be very careful not to overdo it:
85
+
86
+ - Never force jokes when a normal response would be more appropriate.
87
+ - Never make multiple jokes in a row unless the user reacts positively or jokes back.
88
+ - Never make unoriginal jokes. A joke the user has heard before is unoriginal. Examples of unoriginal jokes:
89
+ - Why the chicken crossed the road is unoriginal.
90
+ - What the ocean said to the beach is unoriginal.
91
+ - Why 9 is afraid of 7 is unoriginal.
92
+ - Always err on the side of not making a joke if it may be unoriginal.
93
+ - Never ask if the user wants to hear a joke.
94
+ - Don't overuse casual expressions like "lol" or "lmao" just to fill space or seem casual. Only use them when something is genuinely amusing or when they naturally fit the conversation flow.
95
+
96
+ Tone
97
+
98
+ Conciseness
99
+
100
+ Never output preamble or postamble. Never include unnecessary details when conveying information, except possibly for humor. Never ask the user if they want extra detail or additional tasks. Use your judgement to determine when the user is not asking for information and just chatting.
101
+
102
+ IMPORTANT: Never say "Let me know if you need anything else"
103
+ IMPORTANT: Never say "Anything specific you want to know"
104
+
105
+ Adaptiveness
106
+
107
+ Adapt to the texting style of the user. Use lowercase if the user does. Never use obscure acronyms or slang if the user has not first.
108
+
109
+ When texting with emojis, only use common emojis.
110
+
111
+ IMPORTANT: Never text with emojis if the user has not texted them first.
112
+ IMPORTANT: Never or react use the exact same emojis as the user's last few messages or reactions.
113
+
114
+ You may react using the `reacttomessage` tool more liberally. Even if the user hasn't reacted, you may react to their messages, but again, avoid using the same emojis as the user's last few messages or reactions.
115
+
116
+ IMPORTANT: You must never use `reacttomessage` to a reaction message the user sent.
117
+
118
+ You must match your response length approximately to the user's. If the user is chatting with you and sends you a few words, never send back multiple sentences, unless they are asking for information.
119
+
120
+ Make sure you only adapt to the actual user, tagged with , and not the agent with or other non-user tags.
121
+
122
+ Human Texting Voice
123
+
124
+ You should sound like a friend rather than a traditional chatbot. Prefer not to use corporate jargon or overly formal language. Respond briefly when it makes sense to.
125
+
126
+
127
+ - How can I help you
128
+ - Let me know if you need anything else
129
+ - Let me know if you need assistance
130
+ - No problem at all
131
+ - I'll carry that out right away
132
+ - I apologize for the confusion
133
+
134
+
135
+ When the user is just chatting, do not unnecessarily offer help or to explain anything; this sounds robotic. Humor or sass is a much better choice, but use your judgement.
136
+
137
+ You should never repeat what the user says directly back at them when acknowledging user requests. Instead, acknowledge it naturally.
138
+
139
+ At the end of a conversation, you can react or output an empty string to say nothing when natural.
140
+
141
+ Use timestamps to judge when the conversation ended, and don't continue a conversation from long ago.
142
+
143
+ Even when calling tools, you should never break character when speaking to the user. Your communication with the agents may be in one style, but you must always respond to the user as outlined above.
server/agents/interaction_agent/tools.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tool definitions for interaction agent."""
2
+
3
+ import asyncio
4
+ import json
5
+ from dataclasses import dataclass
6
+ from typing import Any, Optional
7
+
8
+ from ...logging_config import logger
9
+ from ...services.conversation import get_conversation_log
10
+ from ...services.execution import get_agent_roster, get_execution_agent_logs
11
+ from ..execution_agent.batch_manager import ExecutionBatchManager
12
+
13
+
14
+ @dataclass
15
+ class ToolResult:
16
+ """Standardized payload returned by interaction-agent tools."""
17
+
18
+ success: bool
19
+ payload: Any = None
20
+ user_message: Optional[str] = None
21
+ recorded_reply: bool = False
22
+
23
+ # Tool schemas for OpenRouter
24
+ TOOL_SCHEMAS = [
25
+ {
26
+ "type": "function",
27
+ "function": {
28
+ "name": "send_message_to_agent",
29
+ "description": "Deliver instructions to a specific execution agent. Creates a new agent if the name doesn't exist in the roster, or reuses an existing one.",
30
+ "parameters": {
31
+ "type": "object",
32
+ "properties": {
33
+ "agent_name": {
34
+ "type": "string",
35
+ "description": "Human-readable agent name describing its purpose (e.g., 'Vercel Job Offer', 'Email to Sharanjeet'). This name will be used to identify and potentially reuse the agent."
36
+ },
37
+ "instructions": {"type": "string", "description": "Instructions for the agent to execute."},
38
+ },
39
+ "required": ["agent_name", "instructions"],
40
+ "additionalProperties": False,
41
+ },
42
+ },
43
+ },
44
+ {
45
+ "type": "function",
46
+ "function": {
47
+ "name": "send_message_to_user",
48
+ "description": "Deliver a natural-language response directly to the user. Use this for updates, confirmations, or any assistant response the user should see immediately.",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "message": {
53
+ "type": "string",
54
+ "description": "Plain-text message that will be shown to the user and recorded in the conversation log.",
55
+ },
56
+ },
57
+ "required": ["message"],
58
+ "additionalProperties": False,
59
+ },
60
+ },
61
+ },
62
+ {
63
+ "type": "function",
64
+ "function": {
65
+ "name": "send_draft",
66
+ "description": "Record an email draft so the user can review the exact text.",
67
+ "parameters": {
68
+ "type": "object",
69
+ "properties": {
70
+ "to": {
71
+ "type": "string",
72
+ "description": "Recipient email for the draft.",
73
+ },
74
+ "subject": {
75
+ "type": "string",
76
+ "description": "Email subject for the draft.",
77
+ },
78
+ "body": {
79
+ "type": "string",
80
+ "description": "Email body content (plain text).",
81
+ },
82
+ },
83
+ "required": ["to", "subject", "body"],
84
+ "additionalProperties": False,
85
+ },
86
+ },
87
+ },
88
+ {
89
+ "type": "function",
90
+ "function": {
91
+ "name": "wait",
92
+ "description": "Wait silently when a message is already in conversation history to avoid duplicating responses. Adds a <wait> log entry that is not visible to the user.",
93
+ "parameters": {
94
+ "type": "object",
95
+ "properties": {
96
+ "reason": {
97
+ "type": "string",
98
+ "description": "Brief explanation of why waiting (e.g., 'Message already sent', 'Draft already created').",
99
+ },
100
+ },
101
+ "required": ["reason"],
102
+ "additionalProperties": False,
103
+ },
104
+ },
105
+ },
106
+ ]
107
+
108
+ _EXECUTION_BATCH_MANAGER = ExecutionBatchManager()
109
+
110
+
111
+ # Create or reuse execution agent and dispatch instructions asynchronously
112
+ def send_message_to_agent(agent_name: str, instructions: str) -> ToolResult:
113
+ """Send instructions to an execution agent."""
114
+ roster = get_agent_roster()
115
+ roster.load()
116
+ existing_agents = set(roster.get_agents())
117
+ is_new = agent_name not in existing_agents
118
+
119
+ if is_new:
120
+ roster.add_agent(agent_name)
121
+
122
+ get_execution_agent_logs().record_request(agent_name, instructions)
123
+
124
+ action = "Created" if is_new else "Reused"
125
+ logger.info(f"{action} agent: {agent_name}")
126
+
127
+ async def _execute_async() -> None:
128
+ try:
129
+ result = await _EXECUTION_BATCH_MANAGER.execute_agent(agent_name, instructions)
130
+ status = "SUCCESS" if result.success else "FAILED"
131
+ logger.info(f"Agent '{agent_name}' completed: {status}")
132
+ except Exception as exc: # pragma: no cover - defensive
133
+ logger.error(f"Agent '{agent_name}' failed: {str(exc)}")
134
+
135
+ try:
136
+ loop = asyncio.get_running_loop()
137
+ except RuntimeError:
138
+ logger.error("No running event loop available for async execution")
139
+ return ToolResult(success=False, payload={"error": "No event loop available"})
140
+
141
+ loop.create_task(_execute_async())
142
+
143
+ return ToolResult(
144
+ success=True,
145
+ payload={
146
+ "status": "submitted",
147
+ "agent_name": agent_name,
148
+ "new_agent_created": is_new,
149
+ },
150
+ )
151
+
152
+
153
+ # Send immediate message to user and record in conversation history
154
+ def send_message_to_user(message: str) -> ToolResult:
155
+ """Record a user-visible reply in the conversation log."""
156
+ log = get_conversation_log()
157
+ log.record_reply(message)
158
+
159
+ return ToolResult(
160
+ success=True,
161
+ payload={"status": "delivered"},
162
+ user_message=message,
163
+ recorded_reply=True,
164
+ )
165
+
166
+
167
+ # Format and record email draft for user review
168
+ def send_draft(
169
+ to: str,
170
+ subject: str,
171
+ body: str,
172
+ ) -> ToolResult:
173
+ """Record a draft update in the conversation log for the interaction agent."""
174
+ log = get_conversation_log()
175
+
176
+ message = f"To: {to}\nSubject: {subject}\n\n{body}"
177
+
178
+ log.record_reply(message)
179
+ logger.info(f"Draft recorded for: {to}")
180
+
181
+ return ToolResult(
182
+ success=True,
183
+ payload={
184
+ "status": "draft_recorded",
185
+ "to": to,
186
+ "subject": subject,
187
+ },
188
+ recorded_reply=True,
189
+ )
190
+
191
+
192
+ # Record silent wait state to avoid duplicate responses
193
+ def wait(reason: str) -> ToolResult:
194
+ """Wait silently and add a wait log entry that is not visible to the user."""
195
+ log = get_conversation_log()
196
+
197
+ # Record a dedicated wait entry so the UI knows to ignore it
198
+ log.record_wait(reason)
199
+
200
+
201
+ return ToolResult(
202
+ success=True,
203
+ payload={
204
+ "status": "waiting",
205
+ "reason": reason,
206
+ },
207
+ recorded_reply=True,
208
+ )
209
+
210
+
211
+ # Return predefined tool schemas for LLM function calling
212
+ def get_tool_schemas():
213
+ """Return OpenAI-compatible tool schemas."""
214
+ return TOOL_SCHEMAS
215
+
216
+
217
+ # Route tool calls to appropriate handlers with argument validation and error handling
218
+ def handle_tool_call(name: str, arguments: Any) -> ToolResult:
219
+ """Handle tool calls from interaction agent."""
220
+ try:
221
+ if isinstance(arguments, str):
222
+ args = json.loads(arguments) if arguments.strip() else {}
223
+ elif isinstance(arguments, dict):
224
+ args = arguments
225
+ else:
226
+ return ToolResult(success=False, payload={"error": "Invalid arguments format"})
227
+
228
+ if name == "send_message_to_agent":
229
+ return send_message_to_agent(**args)
230
+ if name == "send_message_to_user":
231
+ return send_message_to_user(**args)
232
+ if name == "send_draft":
233
+ return send_draft(**args)
234
+ if name == "wait":
235
+ return wait(**args)
236
+
237
+ logger.warning("unexpected tool", extra={"tool": name})
238
+ return ToolResult(success=False, payload={"error": f"Unknown tool: {name}"})
239
+ except json.JSONDecodeError:
240
+ return ToolResult(success=False, payload={"error": "Invalid JSON"})
241
+ except TypeError as exc:
242
+ return ToolResult(success=False, payload={"error": f"Missing required arguments: {exc}"})
243
+ except Exception as exc: # pragma: no cover - defensive
244
+ logger.error("tool call failed", extra={"tool": name, "error": str(exc)})
245
+ return ToolResult(success=False, payload={"error": "Failed to execute"})
server/app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ from fastapi import FastAPI, HTTPException, Request, status
6
+ from fastapi.exceptions import RequestValidationError
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import JSONResponse
9
+
10
+ from .config import get_settings
11
+ from .logging_config import configure_logging, logger
12
+ from .routes import api_router
13
+ from .services import get_important_email_watcher, get_trigger_scheduler
14
+
15
+
16
+ # Register global exception handlers for consistent error responses across the API
17
+ def register_exception_handlers(app: FastAPI) -> None:
18
+ @app.exception_handler(RequestValidationError)
19
+ async def _validation_exception_handler(request: Request, exc: RequestValidationError):
20
+ logger.debug("validation error", extra={"errors": exc.errors(), "path": str(request.url)})
21
+ return JSONResponse(
22
+ {"ok": False, "error": "Invalid request", "detail": exc.errors()},
23
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
24
+ )
25
+
26
+ @app.exception_handler(HTTPException)
27
+ async def _http_exception_handler(request: Request, exc: HTTPException):
28
+ logger.debug(
29
+ "http error",
30
+ extra={"detail": exc.detail, "status": exc.status_code, "path": str(request.url)},
31
+ )
32
+ detail = exc.detail
33
+ if not isinstance(detail, str):
34
+ detail = json.dumps(detail)
35
+ return JSONResponse({"ok": False, "error": detail}, status_code=exc.status_code)
36
+
37
+ @app.exception_handler(Exception)
38
+ async def _unhandled_exception_handler(request: Request, exc: Exception):
39
+ logger.exception("Unhandled error", extra={"path": str(request.url)})
40
+ return JSONResponse(
41
+ {"ok": False, "error": "Internal server error"},
42
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
43
+ )
44
+
45
+
46
+ configure_logging()
47
+ _settings = get_settings()
48
+
49
+ app = FastAPI(
50
+ title=_settings.app_name,
51
+ version=_settings.app_version,
52
+ docs_url=_settings.resolved_docs_url,
53
+ redoc_url=None,
54
+ )
55
+
56
+ app.add_middleware(
57
+ CORSMiddleware,
58
+ allow_origins=_settings.cors_allow_origins,
59
+ allow_credentials=False,
60
+ allow_methods=["*"],
61
+ allow_headers=["*"],
62
+ )
63
+
64
+ register_exception_handlers(app)
65
+ app.include_router(api_router)
66
+
67
+
68
+ @app.on_event("startup")
69
+ # Initialize background services (trigger scheduler and email watcher) when the app starts
70
+ async def _start_trigger_scheduler() -> None:
71
+ scheduler = get_trigger_scheduler()
72
+ await scheduler.start()
73
+ watcher = get_important_email_watcher()
74
+ await watcher.start()
75
+
76
+
77
+ @app.on_event("shutdown")
78
+ # Gracefully shutdown background services when the app stops
79
+ async def _stop_trigger_scheduler() -> None:
80
+ scheduler = get_trigger_scheduler()
81
+ await scheduler.stop()
82
+ watcher = get_important_email_watcher()
83
+ await watcher.stop()
84
+
85
+
86
+ __all__ = ["app"]
server/config.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simplified configuration management."""
2
+
3
+ import os
4
+ from functools import lru_cache
5
+ from pathlib import Path
6
+ from typing import List, Optional
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ def _load_env_file() -> None:
12
+ """Load .env from root directory if present."""
13
+ env_path = Path(__file__).parent.parent / ".env"
14
+ if not env_path.is_file():
15
+ return
16
+ try:
17
+ for line in env_path.read_text(encoding="utf-8").splitlines():
18
+ stripped = line.strip()
19
+ if stripped and not stripped.startswith("#") and "=" in stripped:
20
+ key, value = stripped.split("=", 1)
21
+ key, value = key.strip(), value.strip().strip("'\"")
22
+ if key and value and key not in os.environ:
23
+ os.environ[key] = value
24
+ except Exception:
25
+ pass
26
+
27
+
28
+ _load_env_file()
29
+
30
+
31
+ DEFAULT_APP_NAME = "OpenPoke Server"
32
+ DEFAULT_APP_VERSION = "0.3.0"
33
+
34
+
35
+ def _env_int(name: str, fallback: int) -> int:
36
+ try:
37
+ return int(os.getenv(name, str(fallback)))
38
+ except (TypeError, ValueError):
39
+ return fallback
40
+
41
+
42
+ class Settings(BaseModel):
43
+ """Application settings with lightweight env fallbacks."""
44
+
45
+ # App metadata
46
+ app_name: str = Field(default=DEFAULT_APP_NAME)
47
+ app_version: str = Field(default=DEFAULT_APP_VERSION)
48
+
49
+ # Server runtime
50
+ server_host: str = Field(default=os.getenv("OPENPOKE_HOST", "0.0.0.0"))
51
+ server_port: int = Field(default=_env_int("OPENPOKE_PORT", 8001))
52
+
53
+ # LLM model selection
54
+ interaction_agent_model: str = Field(default=os.getenv("INTERACTION_AGENT_MODEL", "depei6sgbtxi00w"))
55
+ execution_agent_model: str = Field(default=os.getenv("EXECUTION_AGENT_MODEL", "depei6sgbtxi00w"))
56
+ execution_agent_search_model: str = Field(default=os.getenv("EXECUTION_SEARCH_AGENT_MODEL", "depei6sgbtxi00w"))
57
+ summarizer_model: str = Field(default=os.getenv("SUMMARIZER_MODEL", "depei6sgbtxi00w"))
58
+ email_classifier_model: str = Field(default=os.getenv("EMAIL_CLASSIFIER_MODEL", "depei6sgbtxi00w"))
59
+
60
+ # API Configuration
61
+ api_base_url: str = Field(default=os.getenv("API_BASE_URL", "https://api.friendli.ai/dedicated/v1"))
62
+ api_key: Optional[str] = Field(default=os.getenv("API_KEY"))
63
+ composio_gmail_auth_config_id: Optional[str] = Field(default=os.getenv("COMPOSIO_GMAIL_AUTH_CONFIG_ID"))
64
+ composio_api_key: Optional[str] = Field(default=os.getenv("COMPOSIO_API_KEY"))
65
+
66
+ # HTTP behaviour
67
+ cors_allow_origins_raw: str = Field(default=os.getenv("OPENPOKE_CORS_ALLOW_ORIGINS", "*"))
68
+ enable_docs: bool = Field(default=os.getenv("OPENPOKE_ENABLE_DOCS", "1") != "0")
69
+ docs_url: Optional[str] = Field(default=os.getenv("OPENPOKE_DOCS_URL", "/docs"))
70
+
71
+ # Summarisation controls
72
+ conversation_summary_threshold: int = Field(default=100)
73
+ conversation_summary_tail_size: int = Field(default=10)
74
+
75
+ @property
76
+ def cors_allow_origins(self) -> List[str]:
77
+ """Parse CORS origins from comma-separated string."""
78
+ if self.cors_allow_origins_raw.strip() in {"", "*"}:
79
+ return ["*"]
80
+ return [origin.strip() for origin in self.cors_allow_origins_raw.split(",") if origin.strip()]
81
+
82
+ @property
83
+ def resolved_docs_url(self) -> Optional[str]:
84
+ """Return documentation URL when docs are enabled."""
85
+ return (self.docs_url or "/docs") if self.enable_docs else None
86
+
87
+ @property
88
+ def summarization_enabled(self) -> bool:
89
+ """Flag indicating conversation summarisation is active."""
90
+ return self.conversation_summary_threshold > 0
91
+
92
+
93
+ @lru_cache(maxsize=1)
94
+ def get_settings() -> Settings:
95
+ """Get cached settings instance."""
96
+ return Settings()
server/logging_config.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+
5
+ logger = logging.getLogger("openpoke.server")
6
+
7
+
8
+ def configure_logging() -> None:
9
+ """Configure logging with a fixed log level."""
10
+ if logger.handlers:
11
+ return
12
+
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
16
+ datefmt="%Y-%m-%d %H:%M:%S",
17
+ )
18
+
19
+ logging.getLogger("httpx").setLevel(logging.WARNING)
20
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
server/models/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .chat import ChatHistoryClearResponse, ChatHistoryResponse, ChatMessage, ChatRequest
2
+ from .gmail import GmailConnectPayload, GmailDisconnectPayload, GmailStatusPayload
3
+ from .meta import HealthResponse, RootResponse, SetTimezoneRequest, SetTimezoneResponse
4
+
5
+ __all__ = [
6
+ "ChatMessage",
7
+ "ChatRequest",
8
+ "ChatHistoryResponse",
9
+ "ChatHistoryClearResponse",
10
+ "GmailConnectPayload",
11
+ "GmailDisconnectPayload",
12
+ "GmailStatusPayload",
13
+ "HealthResponse",
14
+ "RootResponse",
15
+ "SetTimezoneRequest",
16
+ "SetTimezoneResponse",
17
+ ]
server/models/chat.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
6
+
7
+
8
+ class ChatMessage(BaseModel):
9
+ model_config = ConfigDict(extra="ignore")
10
+
11
+ role: str = Field(..., min_length=1)
12
+ content: str = Field(...)
13
+ timestamp: Optional[str] = Field(default=None)
14
+
15
+ @model_validator(mode="before")
16
+ @classmethod
17
+ def _coerce_content(cls, data: Any) -> Any:
18
+ if isinstance(data, dict) and "content" in data:
19
+ data["content"] = "" if data["content"] is None else str(data["content"])
20
+ return data
21
+
22
+ def as_openrouter(self) -> Dict[str, str]:
23
+ return {"role": self.role.strip(), "content": self.content}
24
+
25
+
26
+ class ChatRequest(BaseModel):
27
+ model_config = ConfigDict(populate_by_name=True, extra="ignore")
28
+
29
+ messages: List[ChatMessage] = Field(default_factory=list)
30
+ model: Optional[str] = None
31
+ system: Optional[str] = None
32
+ stream: bool = True
33
+
34
+ def openrouter_messages(self) -> List[Dict[str, str]]:
35
+ return [msg.as_openrouter() for msg in self.messages if msg.content.strip()]
36
+
37
+
38
+ class ChatHistoryResponse(BaseModel):
39
+ messages: List[ChatMessage] = Field(default_factory=list)
40
+
41
+
42
+ class ChatHistoryClearResponse(BaseModel):
43
+ ok: bool = True
server/models/gmail.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Optional
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+
8
+ class GmailConnectPayload(BaseModel):
9
+ model_config = ConfigDict(populate_by_name=True)
10
+
11
+ user_id: Optional[str] = Field(default=None, alias="user_id")
12
+ auth_config_id: Optional[str] = Field(default=None, alias="auth_config_id")
13
+
14
+
15
+ class GmailStatusPayload(BaseModel):
16
+ model_config = ConfigDict(populate_by_name=True)
17
+
18
+ user_id: Optional[str] = Field(default=None, alias="user_id")
19
+ connection_request_id: Optional[str] = Field(default=None, alias="connection_request_id")
20
+
21
+
22
+ class GmailDisconnectPayload(BaseModel):
23
+ model_config = ConfigDict(populate_by_name=True)
24
+
25
+ user_id: Optional[str] = Field(default=None, alias="user_id")
26
+ connection_id: Optional[str] = Field(default=None, alias="connection_id")
27
+ connection_request_id: Optional[str] = Field(default=None, alias="connection_request_id")
server/models/meta.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import List
4
+
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class HealthResponse(BaseModel):
9
+ ok: bool
10
+ service: str
11
+ version: str
12
+
13
+
14
+ class RootResponse(BaseModel):
15
+ status: str
16
+ service: str
17
+ version: str
18
+ endpoints: List[str]
19
+
20
+
21
+ class SetTimezoneRequest(BaseModel):
22
+ timezone: str
23
+
24
+
25
+ class SetTimezoneResponse(BaseModel):
26
+ ok: bool = True
27
+ timezone: str
server/openrouter_client/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .client import OpenRouterError, request_chat_completion
2
+
3
+ __all__ = ["OpenRouterError", "request_chat_completion"]
server/openrouter_client/client.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ import httpx
7
+
8
+ from ..config import get_settings
9
+
10
+
11
+ class OpenRouterError(RuntimeError):
12
+ """Raised when the API returns an error response."""
13
+
14
+
15
+ def _headers(*, api_key: Optional[str] = None) -> Dict[str, str]:
16
+ settings = get_settings()
17
+ key = (api_key or settings.api_key or "").strip()
18
+ if not key:
19
+ raise OpenRouterError("Missing API key")
20
+
21
+ headers = {
22
+ "Authorization": f"Bearer {key}",
23
+ "Content-Type": "application/json",
24
+ "Accept": "application/json",
25
+ }
26
+
27
+ return headers
28
+
29
+
30
+ def _build_messages(messages: List[Dict[str, str]], system: Optional[str]) -> List[Dict[str, str]]:
31
+ if system:
32
+ return [{"role": "system", "content": system}, *messages]
33
+ return messages
34
+
35
+
36
+ def _handle_response_error(exc: httpx.HTTPStatusError) -> None:
37
+ response = exc.response
38
+ detail: str
39
+ try:
40
+ payload = response.json()
41
+ detail = payload.get("error") or payload.get("message") or json.dumps(payload)
42
+ except Exception:
43
+ detail = response.text
44
+ raise OpenRouterError(f"API request failed ({response.status_code}): {detail}") from exc
45
+
46
+
47
+ async def request_chat_completion(
48
+ *,
49
+ model: str,
50
+ messages: List[Dict[str, str]],
51
+ system: Optional[str] = None,
52
+ api_key: Optional[str] = None,
53
+ tools: Optional[List[Dict[str, Any]]] = None,
54
+ base_url: Optional[str] = None,
55
+ ) -> Dict[str, Any]:
56
+ """Request a chat completion and return the raw JSON payload."""
57
+
58
+ settings = get_settings()
59
+ base_url = base_url or settings.api_base_url
60
+
61
+ payload: Dict[str, object] = {
62
+ "model": model,
63
+ "messages": _build_messages(messages, system),
64
+ "stream": False,
65
+ }
66
+ if tools:
67
+ payload["tools"] = tools
68
+
69
+ url = f"{base_url.rstrip('/')}/chat/completions"
70
+
71
+ async with httpx.AsyncClient() as client:
72
+ try:
73
+ response = await client.post(
74
+ url,
75
+ headers=_headers(api_key=api_key),
76
+ json=payload,
77
+ timeout=60.0, # Set reasonable timeout instead of None
78
+ )
79
+ try:
80
+ response.raise_for_status()
81
+ except httpx.HTTPStatusError as exc:
82
+ _handle_response_error(exc)
83
+ return response.json()
84
+ except httpx.HTTPStatusError as exc: # pragma: no cover - handled above
85
+ _handle_response_error(exc)
86
+ except httpx.HTTPError as exc:
87
+ raise OpenRouterError(f"API request failed: {exc}") from exc
88
+
89
+ raise OpenRouterError("API request failed: unknown error")
90
+
91
+
92
+ __all__ = ["OpenRouterError", "request_chat_completion"]
server/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi>=0.115.0
2
+ uvicorn[standard]>=0.30.0
3
+ pydantic>=2.7.0
4
+ httpx>=0.27.0
5
+ python-dateutil>=2.9.0
6
+ beautifulsoup4>=4.12.0
7
+ composio>=0.5.0
server/routes/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from fastapi import APIRouter
4
+
5
+ from .chat import router as chat_router
6
+ from .gmail import router as gmail_router
7
+ from .meta import router as meta_router
8
+
9
+ api_router = APIRouter(prefix="/api/v1")
10
+ api_router.include_router(meta_router)
11
+ api_router.include_router(chat_router)
12
+ api_router.include_router(gmail_router)
13
+
14
+ __all__ = ["api_router"]
server/routes/chat.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from fastapi.responses import JSONResponse
3
+
4
+ from ..models import ChatHistoryClearResponse, ChatHistoryResponse, ChatRequest
5
+ from ..services import get_conversation_log, get_trigger_service, handle_chat_request
6
+
7
+ router = APIRouter(prefix="/chat", tags=["chat"])
8
+
9
+
10
+ @router.post("/send", response_class=JSONResponse, summary="Submit a chat message and receive a completion")
11
+ # Handle incoming chat messages and route them to the interaction agent
12
+ async def chat_send(
13
+ payload: ChatRequest,
14
+ ) -> JSONResponse:
15
+ return await handle_chat_request(payload)
16
+
17
+
18
+ @router.get("/history", response_model=ChatHistoryResponse)
19
+ # Retrieve the conversation history from the log
20
+ def chat_history() -> ChatHistoryResponse:
21
+ log = get_conversation_log()
22
+ return ChatHistoryResponse(messages=log.to_chat_messages())
23
+
24
+
25
+ @router.delete("/history", response_model=ChatHistoryClearResponse)
26
+ def clear_history() -> ChatHistoryClearResponse:
27
+ from ..services import get_execution_agent_logs, get_agent_roster
28
+
29
+ # Clear conversation log
30
+ log = get_conversation_log()
31
+ log.clear()
32
+
33
+ # Clear execution agent logs
34
+ execution_logs = get_execution_agent_logs()
35
+ execution_logs.clear_all()
36
+
37
+ # Clear agent roster
38
+ roster = get_agent_roster()
39
+ roster.clear()
40
+
41
+ # Clear stored triggers
42
+ trigger_service = get_trigger_service()
43
+ trigger_service.clear_all()
44
+
45
+ return ChatHistoryClearResponse()
46
+
47
+
48
+ __all__ = ["router"]
server/routes/gmail.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from fastapi import APIRouter, Depends
4
+ from fastapi.responses import JSONResponse
5
+
6
+ from ..config import Settings, get_settings
7
+ from ..models import GmailConnectPayload, GmailDisconnectPayload, GmailStatusPayload
8
+ from ..services import disconnect_account, fetch_status, initiate_connect
9
+
10
+ router = APIRouter(prefix="/gmail", tags=["gmail"])
11
+
12
+
13
+ @router.post("/connect")
14
+ # Initiate Gmail OAuth connection flow through Composio
15
+ async def gmail_connect(payload: GmailConnectPayload, settings: Settings = Depends(get_settings)) -> JSONResponse:
16
+ return initiate_connect(payload, settings)
17
+
18
+
19
+ @router.post("/status")
20
+ # Check the current Gmail connection status and user information
21
+ async def gmail_status(payload: GmailStatusPayload) -> JSONResponse:
22
+ return fetch_status(payload)
23
+
24
+
25
+ @router.post("/disconnect")
26
+ # Disconnect Gmail account and clear cached profile data
27
+ async def gmail_disconnect(payload: GmailDisconnectPayload) -> JSONResponse:
28
+ return disconnect_account(payload)
server/routes/meta.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from fastapi import APIRouter, Depends, HTTPException, Request, status
4
+
5
+ from ..config import Settings, get_settings
6
+ from ..models import (
7
+ HealthResponse,
8
+ RootResponse,
9
+ SetTimezoneRequest,
10
+ SetTimezoneResponse,
11
+ )
12
+ from ..services import get_timezone_store
13
+
14
+ router = APIRouter(tags=["meta"])
15
+
16
+ @router.get("/health", response_model=HealthResponse)
17
+ # Return service health status for monitoring and load balancers
18
+ def health(settings: Settings = Depends(get_settings)) -> HealthResponse:
19
+ return HealthResponse(ok=True, service="openpoke", version=settings.app_version)
20
+
21
+
22
+ @router.get("/meta", response_model=RootResponse)
23
+ # Return service metadata including available API endpoints
24
+ def meta(request: Request, settings: Settings = Depends(get_settings)) -> RootResponse:
25
+ endpoints = sorted(
26
+ {
27
+ route.path
28
+ for route in request.app.routes
29
+ if getattr(route, "include_in_schema", False) and route.path.startswith("/api/")
30
+ }
31
+ )
32
+ return RootResponse(
33
+ status="ok",
34
+ service="openpoke",
35
+ version=settings.app_version,
36
+ endpoints=endpoints,
37
+ )
38
+
39
+
40
+ @router.post("/meta/timezone", response_model=SetTimezoneResponse)
41
+ # Set the user's timezone for proper email timestamp formatting
42
+ def set_timezone(payload: SetTimezoneRequest) -> SetTimezoneResponse:
43
+ store = get_timezone_store()
44
+ try:
45
+ store.set_timezone(payload.timezone)
46
+ except ValueError as exc:
47
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(exc))
48
+ return SetTimezoneResponse(timezone=store.get_timezone())
49
+
50
+
51
+ @router.get("/meta/timezone", response_model=SetTimezoneResponse)
52
+ def get_timezone() -> SetTimezoneResponse:
53
+ store = get_timezone_store()
54
+ return SetTimezoneResponse(timezone=store.get_timezone())
server/server.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """CLI entrypoint for running the FastAPI app with Uvicorn."""
3
+
4
+ import argparse
5
+ import logging
6
+
7
+ import uvicorn
8
+
9
+ from .app import app
10
+ from .config import get_settings
11
+
12
+
13
+ def main() -> None:
14
+ settings = get_settings()
15
+ default_host = settings.server_host
16
+ default_port = settings.server_port
17
+
18
+ parser = argparse.ArgumentParser(description="OpenPoke FastAPI server")
19
+ parser.add_argument("--host", default=default_host, help=f"Host to bind (default: {default_host})")
20
+ parser.add_argument("--port", type=int, default=default_port, help=f"Port to bind (default: {default_port})")
21
+ parser.add_argument("--reload", action="store_true", help="Enable auto-reload for development")
22
+ args = parser.parse_args()
23
+
24
+ # Reduce uvicorn access log noise - only show warnings and errors
25
+ logging.getLogger("uvicorn.access").setLevel(logging.WARNING)
26
+ logging.getLogger("uvicorn").setLevel(logging.INFO)
27
+ # Reduce watchfiles noise during development
28
+ logging.getLogger("watchfiles.main").setLevel(logging.WARNING)
29
+
30
+ if args.reload:
31
+ # For reload mode, use import string
32
+ uvicorn.run(
33
+ "server.app:app",
34
+ host=args.host,
35
+ port=args.port,
36
+ reload=args.reload,
37
+ log_level="info",
38
+ access_log=False, # Disable access logs completely for cleaner output
39
+ )
40
+ else:
41
+ # For production mode, use app object directly
42
+ uvicorn.run(
43
+ app,
44
+ host=args.host,
45
+ port=args.port,
46
+ reload=args.reload,
47
+ log_level="info",
48
+ access_log=False, # Disable access logs completely for cleaner output
49
+ )
50
+
51
+
52
+ if __name__ == "__main__": # pragma: no cover - CLI invocation guard
53
+ main()
server/services/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Service layer components."""
2
+
3
+ from .conversation import (
4
+ ConversationLog,
5
+ SummaryState,
6
+ get_conversation_log,
7
+ get_working_memory_log,
8
+ schedule_summarization,
9
+ )
10
+ from .conversation.chat_handler import handle_chat_request
11
+ from .execution import AgentRoster, ExecutionAgentLogStore, get_agent_roster, get_execution_agent_logs
12
+ from .gmail import (
13
+ GmailSeenStore,
14
+ ImportantEmailWatcher,
15
+ classify_email_importance,
16
+ disconnect_account,
17
+ execute_gmail_tool,
18
+ fetch_status,
19
+ get_active_gmail_user_id,
20
+ get_important_email_watcher,
21
+ initiate_connect,
22
+ )
23
+ from .trigger_scheduler import get_trigger_scheduler
24
+ from .triggers import get_trigger_service
25
+ from .timezone_store import TimezoneStore, get_timezone_store
26
+
27
+
28
+ __all__ = [
29
+ "ConversationLog",
30
+ "SummaryState",
31
+ "handle_chat_request",
32
+ "get_conversation_log",
33
+ "get_working_memory_log",
34
+ "schedule_summarization",
35
+ "AgentRoster",
36
+ "ExecutionAgentLogStore",
37
+ "get_agent_roster",
38
+ "get_execution_agent_logs",
39
+ "GmailSeenStore",
40
+ "ImportantEmailWatcher",
41
+ "classify_email_importance",
42
+ "disconnect_account",
43
+ "execute_gmail_tool",
44
+ "fetch_status",
45
+ "get_active_gmail_user_id",
46
+ "get_important_email_watcher",
47
+ "initiate_connect",
48
+ "get_trigger_scheduler",
49
+ "get_trigger_service",
50
+ "TimezoneStore",
51
+ "get_timezone_store",
52
+ ]