File size: 2,432 Bytes
fc0acea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
"""
Chat routes for OpenAI integration
"""
import logging
from flask import Blueprint, request, jsonify, current_app
from app.services.openai_service import OpenAIService

logger = logging.getLogger(__name__)
chat_bp = Blueprint('chat', __name__)

@chat_bp.route("/chat", methods=["POST"])
def chat():
    """
    Handle chat requests to OpenAI
    
    Expected JSON payload:
    {
        "message": "User message",
        "model": "gpt-3.5-turbo" (optional),
        "temperature": 0.7 (optional),
        "system_message": "System prompt" (optional)
    }
    
    Returns:
    {
        "reply": "AI response",
        "model": "model_used",
        "usage": {...}
    }
    """
    try:
        # Validate request
        if not request.is_json:
            return jsonify({"error": "Request must be JSON"}), 400
        
        data = request.get_json()
        if not data:
            return jsonify({"error": "No JSON data provided"}), 400
        
        # Extract parameters
        user_message = data.get("message", "").strip()
        model = data.get("model")
        temperature = data.get("temperature")
        system_message = data.get("system_message")
        
        # Initialize OpenAI service
        openai_service = OpenAIService(current_app.config['OPENAI_API_KEY'])
        
        # Validate message
        is_valid, error_msg = openai_service.validate_message(user_message)
        if not is_valid:
            return jsonify({"error": error_msg}), 400
        
        logger.info(f"Processing chat request for message length: {len(user_message)}")
        
        # Generate response
        result = openai_service.chat_completion(
            message=user_message,
            model=model,
            temperature=temperature,
            system_message=system_message
        )
        
        return jsonify(result), 200
        
    except Exception as e:
        logger.error(f"Chat endpoint error: {str(e)}")
        return jsonify({
            "error": "Failed to process chat request",
            "message": str(e)
        }), 500

@chat_bp.route("/chat/models", methods=["GET"])
def available_models():
    """
    Get list of available OpenAI models
    """
    models = [
        "gpt-3.5-turbo",
        "gpt-3.5-turbo-16k",
        "gpt-4",
        "gpt-4-turbo-preview"
    ]
    
    return jsonify({
        "models": models,
        "default": "gpt-3.5-turbo"
    }), 200