Vibow commited on
Commit
d2b814b
Β·
verified Β·
1 Parent(s): c1403db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -170
app.py CHANGED
@@ -1,171 +1,257 @@
1
- import os
2
- import time
3
- import base64
4
- import random
5
- import json
6
- import requests
7
- from datetime import datetime, timedelta, timezone
8
- from flask import Flask, request, jsonify, Response
9
-
10
- app = Flask(__name__)
11
-
12
- # ==== API KEYS ====
13
- GROQ_API_KEY_1 = os.getenv("GROQ_API_KEY_1") # Chat
14
- GROQ_API_KEY_2 = os.getenv("GROQ_API_KEY_2") # STT
15
- GROQ_API_KEY_3 = os.getenv("GROQ_API_KEY_3") # TTS
16
-
17
- # ==== URL ====
18
- GROQ_URL_CHAT = "https://api.groq.com/openai/v1/chat/completions"
19
- GROQ_URL_TTS = "https://api.groq.com/openai/v1/audio/speech"
20
- GROQ_URL_STT = "https://api.groq.com/openai/v1/audio/transcriptions"
21
-
22
- # ==== System Prompt (dasar) ====
23
- SYSTEM_PROMPT = (
24
- "You are Talk GTE β€” a friendly AI assistant created by Vibow AI. "
25
- "GTE means Generative Text Expert in Vibow AI. "
26
- "Vibow AI created by Nick Mclen and Vibow ai not from GPT and else. "
27
- "Stay positive, kind, and expert. "
28
- "Always capitalize the first letter of sentences. "
29
- "If the user requests code, always use triple backticks (```). "
30
- "Be concise, neutral, and double-check your answers. "
31
- "If user requests code, always give explanation plus code. "
32
- "If user typo, ask question like 'Do you mean {correct answer}?' with an explanation. "
33
- "Sometimes, use emoji but relevant. "
34
- )
35
-
36
- # ===== STT =====
37
- def transcribe_audio(file_path: str) -> str:
38
- try:
39
- headers = {"Authorization": f"Bearer {GROQ_API_KEY_2}"}
40
- files = {
41
- "file": (os.path.basename(file_path), open(file_path, "rb"), "audio/wav"),
42
- "model": (None, "whisper-large-v3-turbo"),
43
- }
44
- res = requests.post(GROQ_URL_STT, headers=headers, files=files, timeout=60)
45
- res.raise_for_status()
46
- return res.json().get("text", "")
47
- except Exception as e:
48
- print(f"[STT Error] {e}")
49
- return ""
50
- finally:
51
- if os.path.exists(file_path):
52
- os.remove(file_path)
53
-
54
- # ===== TTS =====
55
- def text_to_speech(text: str) -> bytes:
56
- try:
57
- headers = {"Authorization": f"Bearer {GROQ_API_KEY_3}"}
58
- data = {"model": "playai-tts", "voice": "Celeste-PlayAI", "input": text}
59
- res = requests.post(GROQ_URL_TTS, headers=headers, json=data, timeout=60)
60
- if res.status_code != 200:
61
- print(f"[TTS Error] {res.text}")
62
- return b""
63
- return res.content
64
- except Exception as e:
65
- print(f"[TTS Exception] {e}")
66
- return b""
67
-
68
- # ===== Stream Chat (dengan waktu dinamis) =====
69
- def stream_chat(prompt: str, history=None):
70
- # Zona waktu Indonesia (WIB)
71
- wib = timezone(timedelta(hours=7))
72
- now = datetime.now(wib)
73
- formatted_time = now.strftime("%A, %d %B %Y β€” %H:%M:%S WIB")
74
-
75
- # Buat system prompt dinamis dengan waktu sekarang
76
- dynamic_system_prompt = (
77
- SYSTEM_PROMPT
78
- + f"\nCurrent date and time: {formatted_time}."
79
- + "\nIf user asks about date, time, or year, answer based on this."
80
- )
81
-
82
- messages = [{"role": "system", "content": dynamic_system_prompt}]
83
-
84
- # Tambahkan history dari frontend (kalau ada)
85
- if history and isinstance(history, list):
86
- messages.extend(history)
87
-
88
- # Tambahkan prompt user baru
89
- messages.append({"role": "user", "content": prompt})
90
-
91
- payload = {
92
- "model": "moonshotai/kimi-k2-instruct-0905",
93
- "messages": messages,
94
- "temperature": 0.7,
95
- "max_tokens": 3500,
96
- "stream": True,
97
- }
98
-
99
- headers = {
100
- "Authorization": f"Bearer {GROQ_API_KEY_1}",
101
- "Content-Type": "application/json",
102
- }
103
-
104
- try:
105
- with requests.post(GROQ_URL_CHAT, headers=headers, json=payload, stream=True) as r:
106
- for line in r.iter_lines():
107
- if not line:
108
- continue
109
- line = line.decode("utf-8")
110
- if line.startswith("data: "):
111
- data = line[6:]
112
- if data == "[DONE]":
113
- break
114
- try:
115
- chunk = json.loads(data)
116
- delta = chunk["choices"][0]["delta"].get("content", "")
117
- if delta:
118
- yield delta
119
- except:
120
- continue
121
- except Exception as e:
122
- print(f"[Stream Error] {e}")
123
-
124
- # ===== Endpoint utama =====
125
- @app.route("/chat", methods=["POST"])
126
- def chat():
127
- # === Voice ===
128
- if "audio" in request.files:
129
- audio_file = request.files["audio"]
130
- temp_path = f"/tmp/{int(time.time())}_{random.randint(1000,9999)}.wav"
131
- audio_file.save(temp_path)
132
-
133
- user_text = transcribe_audio(temp_path)
134
- if not user_text:
135
- return jsonify({"error": "Failed to transcribe audio"}), 500
136
-
137
- ai_reply = "".join([chunk for chunk in stream_chat(user_text)])
138
- audio_bytes = text_to_speech(ai_reply)
139
- if not audio_bytes:
140
- return jsonify({
141
- "mode": "voice",
142
- "transcript": user_text,
143
- "reply_text": ai_reply,
144
- "error": "TTS failed"
145
- }), 500
146
-
147
- audio_b64 = base64.b64encode(audio_bytes).decode("utf-8")
148
- return jsonify({
149
- "mode": "voice",
150
- "transcript": user_text,
151
- "reply_text": ai_reply,
152
- "audio_base64": f"data:audio/mp3;base64,{audio_b64}"
153
- })
154
-
155
- # === Text ===
156
- data = request.get_json(force=True)
157
- prompt = data.get("prompt", "").strip()
158
- history = data.get("history", []) # ← frontend kirim context kalau mau
159
- if not prompt:
160
- return jsonify({"error": "No input text provided"}), 400
161
-
162
- def generate():
163
- for chunk in stream_chat(prompt, history):
164
- yield chunk
165
-
166
- return Response(generate(), mimetype="text/plain")
167
-
168
- # ===== Jalankan server =====
169
- if __name__ == "__main__":
170
- print(f"πŸš€ Vibow AI Chat Server running β€” {time.strftime('%Y-%m-%d %H:%M:%S')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  app.run(host="0.0.0.0", port=7860, debug=True, threaded=True)
 
1
+ import os
2
+ import time
3
+ import base64
4
+ import random
5
+ import json
6
+ import requests
7
+ from datetime import datetime, timedelta, timezone
8
+ from flask import Flask, request, jsonify, Response, redirect, session, url_for
9
+
10
+ app = Flask(__name__)
11
+ app.secret_key = os.getenv("FLASK_SECRET_KEY") # untuk session user
12
+
13
+ # ==== API KEYS ====
14
+ GROQ_API_KEY_1 = os.getenv("GROQ_API_KEY_1")
15
+ GROQ_API_KEY_2 = os.getenv("GROQ_API_KEY_2")
16
+ GROQ_API_KEY_3 = os.getenv("GROQ_API_KEY_3")
17
+
18
+ # ==== CANVA CONFIG ====
19
+ CANVA_CLIENT_ID = os.getenv("CANVA_CLIENT_ID")
20
+ CANVA_CLIENT_SECRET = os.getenv("CANVA_CLIENT_SECRET")
21
+ CANVA_REDIRECT_URI = os.getenv("CANVA_REDIRECT_URI")
22
+
23
+ # ==== URL ====
24
+ GROQ_URL_CHAT = "https://api.groq.com/openai/v1/chat/completions"
25
+ GROQ_URL_TTS = "https://api.groq.com/openai/v1/audio/speech"
26
+ GROQ_URL_STT = "https://api.groq.com/openai/v1/audio/transcriptions"
27
+
28
+ # ==== SYSTEM PROMPT ====
29
+ SYSTEM_PROMPT = (
30
+ "You are Talk GTE β€” a friendly AI assistant created by Vibow AI. "
31
+ "GTE means Generative Text Expert in Vibow AI. "
32
+ "Vibow AI created by Nick Mclen and Vibow AI is not from GPT. "
33
+ "Stay positive, kind, and expert. "
34
+ "Always capitalize the first letter of sentences. "
35
+ "If the user requests code, always use triple backticks (```). "
36
+ "Be concise, neutral, and double-check your answers. "
37
+ "Sometimes, use emoji but relevant. "
38
+ )
39
+
40
+ # ============================================================
41
+ # 🎨 CANVA OAUTH β€” dapatkan nama user Canva
42
+ # ============================================================
43
+
44
+ user_tokens = {} # contoh penyimpanan sementara (pakai DB di versi production)
45
+
46
+ @app.route("/canva/authorize")
47
+ def canva_authorize():
48
+ auth_url = (
49
+ "https://www.canva.com/oauth/authorize"
50
+ f"?client_id={CANVA_CLIENT_ID}"
51
+ f"&redirect_uri={CANVA_REDIRECT_URI}"
52
+ "&response_type=code"
53
+ "&scope=openid+profile"
54
+ )
55
+ return redirect(auth_url)
56
+
57
+
58
+ @app.route("/canva/callback")
59
+ def canva_callback():
60
+ code = request.args.get("code")
61
+ if not code:
62
+ return jsonify({"error": "Missing code from Canva"}), 400
63
+
64
+ # Tukar code jadi access_token
65
+ token_res = requests.post(
66
+ "https://api.canva.com/oauth/token",
67
+ data={
68
+ "client_id": CANVA_CLIENT_ID,
69
+ "client_secret": CANVA_CLIENT_SECRET,
70
+ "grant_type": "authorization_code",
71
+ "redirect_uri": CANVA_REDIRECT_URI,
72
+ "code": code,
73
+ },
74
+ )
75
+
76
+ if token_res.status_code != 200:
77
+ return jsonify({"error": "Failed to get access token", "details": token_res.text}), 400
78
+
79
+ tokens = token_res.json()
80
+ access_token = tokens["access_token"]
81
+
82
+ # Ambil data user Canva
83
+ headers = {"Authorization": f"Bearer {access_token}"}
84
+ user_res = requests.get("https://api.canva.com/rest/v1/users/me", headers=headers)
85
+ user_data = user_res.json()
86
+
87
+ user_id = user_data.get("id")
88
+ user_name = user_data.get("display_name")
89
+
90
+ # Simpan token per user
91
+ user_tokens[user_id] = {
92
+ "token": access_token,
93
+ "name": user_name,
94
+ "time": time.time(),
95
+ }
96
+
97
+ session["canva_user_id"] = user_id
98
+
99
+ return jsonify({
100
+ "message": f"Connected to Canva as {user_name}",
101
+ "user_id": user_id,
102
+ "name": user_name,
103
+ })
104
+
105
+
106
+ @app.route("/canva/user")
107
+ def canva_user():
108
+ user_id = session.get("canva_user_id")
109
+ if not user_id or user_id not in user_tokens:
110
+ return jsonify({"error": "User not connected to Canva"}), 401
111
+
112
+ user_info = user_tokens[user_id]
113
+ return jsonify({
114
+ "id": user_id,
115
+ "name": user_info["name"],
116
+ "connected_at": datetime.fromtimestamp(user_info["time"]).isoformat()
117
+ })
118
+
119
+
120
+ # ============================================================
121
+ # πŸ”Š STT (speech to text)
122
+ # ============================================================
123
+ def transcribe_audio(file_path: str) -> str:
124
+ try:
125
+ headers = {"Authorization": f"Bearer {GROQ_API_KEY_2}"}
126
+ files = {
127
+ "file": (os.path.basename(file_path), open(file_path, "rb"), "audio/wav"),
128
+ "model": (None, "whisper-large-v3-turbo"),
129
+ }
130
+ res = requests.post(GROQ_URL_STT, headers=headers, files=files, timeout=60)
131
+ res.raise_for_status()
132
+ return res.json().get("text", "")
133
+ except Exception as e:
134
+ print(f"[STT Error] {e}")
135
+ return ""
136
+ finally:
137
+ if os.path.exists(file_path):
138
+ os.remove(file_path)
139
+
140
+
141
+ # ============================================================
142
+ # πŸ—£οΈ TTS (text to speech)
143
+ # ============================================================
144
+ def text_to_speech(text: str) -> bytes:
145
+ try:
146
+ headers = {"Authorization": f"Bearer {GROQ_API_KEY_3}"}
147
+ data = {"model": "playai-tts", "voice": "Celeste-PlayAI", "input": text}
148
+ res = requests.post(GROQ_URL_TTS, headers=headers, json=data, timeout=60)
149
+ if res.status_code != 200:
150
+ print(f"[TTS Error] {res.text}")
151
+ return b""
152
+ return res.content
153
+ except Exception as e:
154
+ print(f"[TTS Exception] {e}")
155
+ return b""
156
+
157
+
158
+ # ============================================================
159
+ # πŸ’¬ Stream Chat (AI core)
160
+ # ============================================================
161
+ def stream_chat(prompt: str, history=None):
162
+ wib = timezone(timedelta(hours=7))
163
+ now = datetime.now(wib)
164
+ formatted_time = now.strftime("%A, %d %B %Y β€” %H:%M:%S WIB")
165
+
166
+ dynamic_system_prompt = (
167
+ SYSTEM_PROMPT + f"\nCurrent date and time: {formatted_time}."
168
+ + "\nIf user asks about date, time, or year, answer based on this."
169
+ )
170
+
171
+ messages = [{"role": "system", "content": dynamic_system_prompt}]
172
+ if history and isinstance(history, list):
173
+ messages.extend(history)
174
+ messages.append({"role": "user", "content": prompt})
175
+
176
+ payload = {
177
+ "model": "moonshotai/kimi-k2-instruct-0905",
178
+ "messages": messages,
179
+ "temperature": 0.7,
180
+ "max_tokens": 3500,
181
+ "stream": True,
182
+ }
183
+
184
+ headers = {"Authorization": f"Bearer {GROQ_API_KEY_1}", "Content-Type": "application/json"}
185
+
186
+ try:
187
+ with requests.post(GROQ_URL_CHAT, headers=headers, json=payload, stream=True) as r:
188
+ for line in r.iter_lines():
189
+ if not line:
190
+ continue
191
+ line = line.decode("utf-8")
192
+ if line.startswith("data: "):
193
+ data = line[6:]
194
+ if data == "[DONE]":
195
+ break
196
+ try:
197
+ chunk = json.loads(data)
198
+ delta = chunk["choices"][0]["delta"].get("content", "")
199
+ if delta:
200
+ yield delta
201
+ except:
202
+ continue
203
+ except Exception as e:
204
+ print(f"[Stream Error] {e}")
205
+
206
+
207
+ # ============================================================
208
+ # 🧠 Endpoint utama (chat)
209
+ # ============================================================
210
+ @app.route("/chat", methods=["POST"])
211
+ def chat():
212
+ if "audio" in request.files:
213
+ audio_file = request.files["audio"]
214
+ temp_path = f"/tmp/{int(time.time())}_{random.randint(1000,9999)}.wav"
215
+ audio_file.save(temp_path)
216
+
217
+ user_text = transcribe_audio(temp_path)
218
+ if not user_text:
219
+ return jsonify({"error": "Failed to transcribe audio"}), 500
220
+
221
+ ai_reply = "".join([chunk for chunk in stream_chat(user_text)])
222
+ audio_bytes = text_to_speech(ai_reply)
223
+ if not audio_bytes:
224
+ return jsonify({
225
+ "mode": "voice",
226
+ "transcript": user_text,
227
+ "reply_text": ai_reply,
228
+ "error": "TTS failed"
229
+ }), 500
230
+
231
+ audio_b64 = base64.b64encode(audio_bytes).decode("utf-8")
232
+ return jsonify({
233
+ "mode": "voice",
234
+ "transcript": user_text,
235
+ "reply_text": ai_reply,
236
+ "audio_base64": f"data:audio/mp3;base64,{audio_b64}"
237
+ })
238
+
239
+ data = request.get_json(force=True)
240
+ prompt = data.get("prompt", "").strip()
241
+ history = data.get("history", [])
242
+ if not prompt:
243
+ return jsonify({"error": "No input text provided"}), 400
244
+
245
+ def generate():
246
+ for chunk in stream_chat(prompt, history):
247
+ yield chunk
248
+
249
+ return Response(generate(), mimetype="text/plain")
250
+
251
+
252
+ # ============================================================
253
+ # πŸš€ Run Server
254
+ # ============================================================
255
+ if __name__ == "__main__":
256
+ print(f"πŸš€ Vibow AI Chat Server running β€” {time.strftime('%Y-%m-%d %H:%M:%S')}")
257
  app.run(host="0.0.0.0", port=7860, debug=True, threaded=True)