import os from flask import Flask, request, jsonify from dotenv import load_dotenv from pymongo import MongoClient import openai from datetime import datetime load_dotenv() app = Flask(__name__) # MongoDB bağlantısı client = MongoClient(os.getenv("MONGODB_URI")) db = client[os.getenv("MONGODB_DB_NAME")] # OpenAI API anahtarı openai.api_key = os.getenv("OPENAI_API_KEY") # Model grupları ve modeller MODEL_GROUPS = { "GPT4_GROUP": ["gpt-4", "gpt-4-0125-preview", "gpt-4-0613", "gpt-4-1106-preview"], "GPT4_TURBO_GROUP": ["gpt-4-turbo", "gpt-4-turbo-preview"], "GPT35_GROUP": ["gpt-3.5-turbo", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-1106"], } MODELS = { "GPT4": MODEL_GROUPS["GPT4_GROUP"] + MODEL_GROUPS["GPT4_TURBO_GROUP"], "GPT35": MODEL_GROUPS["GPT35_GROUP"], "IMAGE": ["dall-e-2", "dall-e-3"], } # Kullanıcı seviyeleri TIERS = { 1: { "models": MODELS["GPT35"], "limits": {"daily": 100, "GPT35_GROUP": 100} }, 2: { "models": MODELS["GPT35"] + MODELS["GPT4"], "limits": {"daily": 500, "GPT35_GROUP": 300, "GPT4_GROUP": 150, "GPT4_TURBO_GROUP": 50} }, 3: { "models": MODELS["GPT35"] + MODELS["GPT4"] + MODELS["IMAGE"], "limits": {"daily": 1000, "GPT35_GROUP": 500, "GPT4_GROUP": 300, "GPT4_TURBO_GROUP": 100, "dall-e-2": 50, "dall-e-3": 25} } } def get_user_tier(api_key): user = db.users.find_one({"api_key": api_key}) if user: return user["tier"] for i in range(1, 4): if os.getenv(f"USER_{i}_API_KEY") == api_key: return int(os.getenv(f"USER_{i}_TIER")) return None def get_model_group(model): for group, models in MODEL_GROUPS.items(): if model in models: return group return model def check_rate_limit(api_key, tier, model): now = datetime.now() today = now.date().isoformat() model_group = get_model_group(model) usage = db.usage.find_one_and_update( {"api_key": api_key, "date": today}, {"$inc": {"daily": 1, model_group: 1}}, upsert=True, return_document=True ) tier_limits = TIERS[tier]["limits"] if usage["daily"] > tier_limits["daily"]: raise Exception("Daily rate limit exceeded") if usage.get(model_group, 0) > tier_limits.get(model_group, float("inf")): raise Exception(f"Rate limit for {model_group} exceeded") @app.route('/v1/chat/completions', methods=['POST']) def chat_completions(): data = request.json api_key = request.headers.get('Authorization', '').replace('Bearer ', '') user_tier = get_user_tier(api_key) if not user_tier: return jsonify({"error": "Invalid API key"}), 401 model = data.get('model') if model not in TIERS[user_tier]["models"]: return jsonify({"error": "Model not available for your tier"}), 403 try: check_rate_limit(api_key, user_tier, model) response = openai.ChatCompletion.create(**data) return jsonify(response) except Exception as e: return jsonify({"error": str(e)}), 429 @app.route('/v1/images/generations', methods=['POST']) def image_generations(): data = request.json api_key = request.headers.get('Authorization', '').replace('Bearer ', '') user_tier = get_user_tier(api_key) if not user_tier: return jsonify({"error": "Invalid API key"}), 401 model = data.get('model', 'dall-e-2') if model not in TIERS[user_tier]["models"]: return jsonify({"error": "Model not available for your tier"}), 403 try: check_rate_limit(api_key, user_tier, model) response = openai.Image.create(**data) return jsonify(response) except Exception as e: return jsonify({"error": str(e)}), 429 if __name__ == '__main__': app.run(host='0.0.0.0', port=7860)