File size: 1,736 Bytes
2a3136f 63d5a2c 2a3136f 63d5a2c 2a3136f 3349228 2a3136f 3349228 2a3136f dc28731 2a3136f fec40ce 2a3136f 3349228 63d5a2c 2a3136f 3349228 2a3136f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from flask import Flask, request, jsonify
from transformers import AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import login
import torch
import os
app = Flask(__name__)
# ✅ Securely fetch HF Token from environment (invisible to users)
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
raise ValueError("HF_TOKEN is not set in environment variables!")
# 🔐 Authenticate
login(token=hf_token)
# 🔄 Load model from Hugging Face
model_id = "dexcommunity/dex"
print("🔄 Loading model...")
# ⚠️ FIXED this line: fast tokenizer disabled to avoid tokenizer.json crash
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
print("✅ Model loaded!")
@app.route('/chat', methods=['POST'])
def chat():
try:
data = request.get_json()
msg = data.get("message", "")
if not msg:
return jsonify({"error": "No message sent"}), 400
prompt = f"User: {msg}\nDex:"
inputs = tokenizer(prompt, return_tensors="pt").to(device)
outputs = model.generate(
inputs.input_ids,
max_length=256,
do_sample=True,
top_k=50,
top_p=0.95,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id
)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
reply = text.split("Dex:")[-1].strip()
return jsonify({"reply": reply})
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == "__main__":
app.run(host='0.0.0.0', port=7860) |