Spaces:
Runtime error
Runtime error
| """ | |
| Chat routes for OpenAI integration | |
| """ | |
| import logging | |
| from flask import Blueprint, request, jsonify, current_app | |
| from app.services.openai_service import OpenAIService | |
| logger = logging.getLogger(__name__) | |
| chat_bp = Blueprint('chat', __name__) | |
| def chat(): | |
| """ | |
| Handle chat requests to OpenAI | |
| Expected JSON payload: | |
| { | |
| "message": "User message", | |
| "model": "gpt-3.5-turbo" (optional), | |
| "temperature": 0.7 (optional), | |
| "system_message": "System prompt" (optional) | |
| } | |
| Returns: | |
| { | |
| "reply": "AI response", | |
| "model": "model_used", | |
| "usage": {...} | |
| } | |
| """ | |
| try: | |
| # Validate request | |
| if not request.is_json: | |
| return jsonify({"error": "Request must be JSON"}), 400 | |
| data = request.get_json() | |
| if not data: | |
| return jsonify({"error": "No JSON data provided"}), 400 | |
| # Extract parameters | |
| user_message = data.get("message", "").strip() | |
| model = data.get("model") | |
| temperature = data.get("temperature") | |
| system_message = data.get("system_message") | |
| # Initialize OpenAI service | |
| openai_service = OpenAIService(current_app.config['OPENAI_API_KEY']) | |
| # Validate message | |
| is_valid, error_msg = openai_service.validate_message(user_message) | |
| if not is_valid: | |
| return jsonify({"error": error_msg}), 400 | |
| logger.info(f"Processing chat request for message length: {len(user_message)}") | |
| # Generate response | |
| result = openai_service.chat_completion( | |
| message=user_message, | |
| model=model, | |
| temperature=temperature, | |
| system_message=system_message | |
| ) | |
| return jsonify(result), 200 | |
| except Exception as e: | |
| logger.error(f"Chat endpoint error: {str(e)}") | |
| return jsonify({ | |
| "error": "Failed to process chat request", | |
| "message": str(e) | |
| }), 500 | |
| def available_models(): | |
| """ | |
| Get list of available OpenAI models | |
| """ | |
| models = [ | |
| "gpt-3.5-turbo", | |
| "gpt-3.5-turbo-16k", | |
| "gpt-4", | |
| "gpt-4-turbo-preview" | |
| ] | |
| return jsonify({ | |
| "models": models, | |
| "default": "gpt-3.5-turbo" | |
| }), 200 |