Llama-vibow / app.py
Vibow's picture
Update app.py
d2b814b verified
raw
history blame
8.71 kB
import os
import time
import base64
import random
import json
import requests
from datetime import datetime, timedelta, timezone
from flask import Flask, request, jsonify, Response, redirect, session, url_for
app = Flask(__name__)
app.secret_key = os.getenv("FLASK_SECRET_KEY") # untuk session user
# ==== API KEYS ====
GROQ_API_KEY_1 = os.getenv("GROQ_API_KEY_1")
GROQ_API_KEY_2 = os.getenv("GROQ_API_KEY_2")
GROQ_API_KEY_3 = os.getenv("GROQ_API_KEY_3")
# ==== CANVA CONFIG ====
CANVA_CLIENT_ID = os.getenv("CANVA_CLIENT_ID")
CANVA_CLIENT_SECRET = os.getenv("CANVA_CLIENT_SECRET")
CANVA_REDIRECT_URI = os.getenv("CANVA_REDIRECT_URI")
# ==== URL ====
GROQ_URL_CHAT = "https://api.groq.com/openai/v1/chat/completions"
GROQ_URL_TTS = "https://api.groq.com/openai/v1/audio/speech"
GROQ_URL_STT = "https://api.groq.com/openai/v1/audio/transcriptions"
# ==== SYSTEM PROMPT ====
SYSTEM_PROMPT = (
"You are Talk GTE β€” a friendly AI assistant created by Vibow AI. "
"GTE means Generative Text Expert in Vibow AI. "
"Vibow AI created by Nick Mclen and Vibow AI is not from GPT. "
"Stay positive, kind, and expert. "
"Always capitalize the first letter of sentences. "
"If the user requests code, always use triple backticks (```). "
"Be concise, neutral, and double-check your answers. "
"Sometimes, use emoji but relevant. "
)
# ============================================================
# 🎨 CANVA OAUTH β€” dapatkan nama user Canva
# ============================================================
user_tokens = {} # contoh penyimpanan sementara (pakai DB di versi production)
@app.route("/canva/authorize")
def canva_authorize():
auth_url = (
"https://www.canva.com/oauth/authorize"
f"?client_id={CANVA_CLIENT_ID}"
f"&redirect_uri={CANVA_REDIRECT_URI}"
"&response_type=code"
"&scope=openid+profile"
)
return redirect(auth_url)
@app.route("/canva/callback")
def canva_callback():
code = request.args.get("code")
if not code:
return jsonify({"error": "Missing code from Canva"}), 400
# Tukar code jadi access_token
token_res = requests.post(
"https://api.canva.com/oauth/token",
data={
"client_id": CANVA_CLIENT_ID,
"client_secret": CANVA_CLIENT_SECRET,
"grant_type": "authorization_code",
"redirect_uri": CANVA_REDIRECT_URI,
"code": code,
},
)
if token_res.status_code != 200:
return jsonify({"error": "Failed to get access token", "details": token_res.text}), 400
tokens = token_res.json()
access_token = tokens["access_token"]
# Ambil data user Canva
headers = {"Authorization": f"Bearer {access_token}"}
user_res = requests.get("https://api.canva.com/rest/v1/users/me", headers=headers)
user_data = user_res.json()
user_id = user_data.get("id")
user_name = user_data.get("display_name")
# Simpan token per user
user_tokens[user_id] = {
"token": access_token,
"name": user_name,
"time": time.time(),
}
session["canva_user_id"] = user_id
return jsonify({
"message": f"Connected to Canva as {user_name}",
"user_id": user_id,
"name": user_name,
})
@app.route("/canva/user")
def canva_user():
user_id = session.get("canva_user_id")
if not user_id or user_id not in user_tokens:
return jsonify({"error": "User not connected to Canva"}), 401
user_info = user_tokens[user_id]
return jsonify({
"id": user_id,
"name": user_info["name"],
"connected_at": datetime.fromtimestamp(user_info["time"]).isoformat()
})
# ============================================================
# πŸ”Š STT (speech to text)
# ============================================================
def transcribe_audio(file_path: str) -> str:
try:
headers = {"Authorization": f"Bearer {GROQ_API_KEY_2}"}
files = {
"file": (os.path.basename(file_path), open(file_path, "rb"), "audio/wav"),
"model": (None, "whisper-large-v3-turbo"),
}
res = requests.post(GROQ_URL_STT, headers=headers, files=files, timeout=60)
res.raise_for_status()
return res.json().get("text", "")
except Exception as e:
print(f"[STT Error] {e}")
return ""
finally:
if os.path.exists(file_path):
os.remove(file_path)
# ============================================================
# πŸ—£οΈ TTS (text to speech)
# ============================================================
def text_to_speech(text: str) -> bytes:
try:
headers = {"Authorization": f"Bearer {GROQ_API_KEY_3}"}
data = {"model": "playai-tts", "voice": "Celeste-PlayAI", "input": text}
res = requests.post(GROQ_URL_TTS, headers=headers, json=data, timeout=60)
if res.status_code != 200:
print(f"[TTS Error] {res.text}")
return b""
return res.content
except Exception as e:
print(f"[TTS Exception] {e}")
return b""
# ============================================================
# πŸ’¬ Stream Chat (AI core)
# ============================================================
def stream_chat(prompt: str, history=None):
wib = timezone(timedelta(hours=7))
now = datetime.now(wib)
formatted_time = now.strftime("%A, %d %B %Y β€” %H:%M:%S WIB")
dynamic_system_prompt = (
SYSTEM_PROMPT + f"\nCurrent date and time: {formatted_time}."
+ "\nIf user asks about date, time, or year, answer based on this."
)
messages = [{"role": "system", "content": dynamic_system_prompt}]
if history and isinstance(history, list):
messages.extend(history)
messages.append({"role": "user", "content": prompt})
payload = {
"model": "moonshotai/kimi-k2-instruct-0905",
"messages": messages,
"temperature": 0.7,
"max_tokens": 3500,
"stream": True,
}
headers = {"Authorization": f"Bearer {GROQ_API_KEY_1}", "Content-Type": "application/json"}
try:
with requests.post(GROQ_URL_CHAT, headers=headers, json=payload, stream=True) as r:
for line in r.iter_lines():
if not line:
continue
line = line.decode("utf-8")
if line.startswith("data: "):
data = line[6:]
if data == "[DONE]":
break
try:
chunk = json.loads(data)
delta = chunk["choices"][0]["delta"].get("content", "")
if delta:
yield delta
except:
continue
except Exception as e:
print(f"[Stream Error] {e}")
# ============================================================
# 🧠 Endpoint utama (chat)
# ============================================================
@app.route("/chat", methods=["POST"])
def chat():
if "audio" in request.files:
audio_file = request.files["audio"]
temp_path = f"/tmp/{int(time.time())}_{random.randint(1000,9999)}.wav"
audio_file.save(temp_path)
user_text = transcribe_audio(temp_path)
if not user_text:
return jsonify({"error": "Failed to transcribe audio"}), 500
ai_reply = "".join([chunk for chunk in stream_chat(user_text)])
audio_bytes = text_to_speech(ai_reply)
if not audio_bytes:
return jsonify({
"mode": "voice",
"transcript": user_text,
"reply_text": ai_reply,
"error": "TTS failed"
}), 500
audio_b64 = base64.b64encode(audio_bytes).decode("utf-8")
return jsonify({
"mode": "voice",
"transcript": user_text,
"reply_text": ai_reply,
"audio_base64": f"data:audio/mp3;base64,{audio_b64}"
})
data = request.get_json(force=True)
prompt = data.get("prompt", "").strip()
history = data.get("history", [])
if not prompt:
return jsonify({"error": "No input text provided"}), 400
def generate():
for chunk in stream_chat(prompt, history):
yield chunk
return Response(generate(), mimetype="text/plain")
# ============================================================
# πŸš€ Run Server
# ============================================================
if __name__ == "__main__":
print(f"πŸš€ Vibow AI Chat Server running β€” {time.strftime('%Y-%m-%d %H:%M:%S')}")
app.run(host="0.0.0.0", port=7860, debug=True, threaded=True)