wasmdashai commited on
Commit
4b366ee
·
verified ·
1 Parent(s): 313105c

Update Index.html

Browse files
Files changed (1) hide show
  1. Index.html +438 -0
Index.html CHANGED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>LAHJA AI - Voice Assistant</title>
7
+ <script src="https://cdn.tailwindcss.com"></script>
8
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
9
+ <style>
10
+ @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap');
11
+
12
+ body {
13
+ font-family: 'Poppins', sans-serif;
14
+ background: linear-gradient(135deg, #1a1a2e, #16213e);
15
+ color: #fff;
16
+ height: 100vh;
17
+ overflow: hidden;
18
+ }
19
+
20
+ .chat-container {
21
+ background: rgba(255, 255, 255, 0.1);
22
+ backdrop-filter: blur(10px);
23
+ border-radius: 20px;
24
+ box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
25
+ border: 1px solid rgba(255, 255, 255, 0.1);
26
+ }
27
+
28
+ .message-bubble {
29
+ max-width: 80%;
30
+ padding: 12px 18px;
31
+ border-radius: 20px;
32
+ margin-bottom: 12px;
33
+ position: relative;
34
+ animation: fadeIn 0.3s ease-out;
35
+ }
36
+
37
+ .user-message {
38
+ background: linear-gradient(135deg, #4e54c8, #8f94fb);
39
+ align-self: flex-end;
40
+ border-bottom-right-radius: 5px;
41
+ }
42
+
43
+ .ai-message {
44
+ background: rgba(255, 255, 255, 0.15);
45
+ align-self: flex-start;
46
+ border-bottom-left-radius: 5px;
47
+ }
48
+
49
+ .pulse {
50
+ animation: pulse 1.5s infinite;
51
+ }
52
+
53
+ @keyframes pulse {
54
+ 0% { transform: scale(1); }
55
+ 50% { transform: scale(1.05); }
56
+ 100% { transform: scale(1); }
57
+ }
58
+
59
+ @keyframes fadeIn {
60
+ from { opacity: 0; transform: translateY(10px); }
61
+ to { opacity: 1; transform: translateY(0); }
62
+ }
63
+
64
+ .waveform {
65
+ display: flex;
66
+ align-items: center;
67
+ height: 40px;
68
+ gap: 3px;
69
+ }
70
+
71
+ .waveform-bar {
72
+ background: rgba(255, 255, 255, 0.7);
73
+ width: 4px;
74
+ border-radius: 2px;
75
+ animation: equalize 1.5s infinite ease-in-out;
76
+ }
77
+
78
+ @keyframes equalize {
79
+ 0%, 100% { height: 10px; }
80
+ 50% { height: 20px; }
81
+ }
82
+
83
+ .waveform-bar:nth-child(1) { animation-delay: -0.9s; }
84
+ .waveform-bar:nth-child(2) { animation-delay: -0.7s; }
85
+ .waveform-bar:nth-child(3) { animation-delay: -0.5s; }
86
+ .waveform-bar:nth-child(4) { animation-delay: -0.3s; }
87
+ .waveform-bar:nth-child(5) { animation-delay: -0.1s; }
88
+
89
+ .typing-indicator {
90
+ display: flex;
91
+ align-items: center;
92
+ gap: 5px;
93
+ }
94
+
95
+ .typing-dot {
96
+ width: 8px;
97
+ height: 8px;
98
+ background: rgba(255, 255, 255, 0.7);
99
+ border-radius: 50%;
100
+ animation: typingAnimation 1.4s infinite ease-in-out;
101
+ }
102
+
103
+ .typing-dot:nth-child(1) { animation-delay: 0s; }
104
+ .typing-dot:nth-child(2) { animation-delay: 0.2s; }
105
+ .typing-dot:nth-child(3) { animation-delay: 0.4s; }
106
+
107
+ @keyframes typingAnimation {
108
+ 0%, 60%, 100% { transform: translateY(0); }
109
+ 30% { transform: translateY(-5px); }
110
+ }
111
+
112
+ .scrollbar-hide::-webkit-scrollbar {
113
+ display: none;
114
+ }
115
+
116
+ .scrollbar-hide {
117
+ -ms-overflow-style: none;
118
+ scrollbar-width: none;
119
+ }
120
+ </style>
121
+ </head>
122
+ <body class="flex items-center justify-center p-4">
123
+ <div class="chat-container w-full max-w-2xl h-[80vh] flex flex-col">
124
+ <!-- Header -->
125
+ <div class="p-4 border-b border-gray-700 flex items-center justify-between">
126
+ <div class="flex items-center gap-3">
127
+ <div class="w-10 h-10 rounded-full bg-gradient-to-r from-purple-500 to-blue-500 flex items-center justify-center">
128
+ <i class="fas fa-robot text-white"></i>
129
+ </div>
130
+ <div>
131
+ <h2 class="font-semibold">Voice Assistant</h2>
132
+ <p class="text-xs text-gray-300">Powered by ChatGPT</p>
133
+ </div>
134
+ </div>
135
+ <div class="flex items-center gap-2">
136
+ <button id="settings-btn" class="p-2 rounded-full hover:bg-gray-700 transition">
137
+ <i class="fas fa-cog text-gray-300"></i>
138
+ </button>
139
+ </div>
140
+ </div>
141
+
142
+ <!-- Chat Messages -->
143
+ <div id="chat-messages" class="flex-1 p-4 overflow-y-auto scrollbar-hide flex flex-col">
144
+ <!-- Initial welcome message -->
145
+ <div class="message-bubble ai-message">
146
+ <p>مرحباً! أنا مساعدك الصوتي الذكي. اضغط على زر الميكروفون لبدء المحادثة معي.</p>
147
+ </div>
148
+ </div>
149
+
150
+ <!-- Input Area -->
151
+ <div class="p-4 border-t border-gray-700">
152
+ <div class="flex items-center gap-2">
153
+ <button id="voice-btn" class="w-14 h-14 rounded-full bg-gradient-to-r from-purple-600 to-blue-500 flex items-center justify-center text-white hover:from-purple-700 hover:to-blue-600 transition-all shadow-lg pulse">
154
+ <i class="fas fa-microphone text-xl"></i>
155
+ </button>
156
+ <div class="flex-1 bg-gray-700 rounded-full px-4 py-2 flex items-center justify-between">
157
+ <p id="voice-status" class="text-sm text-gray-300">اضغط على الميكروفون للتحدث</p>
158
+ <div id="waveform" class="waveform hidden">
159
+ <div class="waveform-bar"></div>
160
+ <div class="waveform-bar"></div>
161
+ <div class="waveform-bar"></div>
162
+ <div class="waveform-bar"></div>
163
+ <div class="waveform-bar"></div>
164
+ </div>
165
+ </div>
166
+ </div>
167
+ </div>
168
+ </div>
169
+
170
+ <!-- Settings Modal -->
171
+ <div id="settings-modal" class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center hidden z-50">
172
+ <div class="bg-gray-800 rounded-xl p-6 w-full max-w-md">
173
+ <div class="flex justify-between items-center mb-4">
174
+ <h3 class="text-xl font-semibold">الإعدادات</h3>
175
+ <button id="close-settings" class="text-gray-400 hover:text-white">
176
+ <i class="fas fa-times"></i>
177
+ </button>
178
+ </div>
179
+ <div class="space-y-4">
180
+ <div>
181
+ <label class="block text-sm font-medium mb-1">Voice Model</label>
182
+ <select id="voiceSelect" class="w-full bg-gray-700 border border-gray-600 rounded-lg px-3 py-2 text-white">
183
+ <option value="SA2">Najdi Arabic Haba v2</option>
184
+ <option value="us">American English</option>
185
+ <option value="SA1">Najdi Arabic Haba v1</option>
186
+ <option value="SA3">Najdi Arabic AHmmed v1</option>
187
+ </select>
188
+ </div>
189
+ <div>
190
+ <label class="block text-sm font-medium mb-1">نبرة الصوت</label>
191
+ <select class="w-full bg-gray-700 border border-gray-600 rounded-lg px-3 py-2 text-white">
192
+ <option>ذكر</option>
193
+ <option>أنثى</option>
194
+ </select>
195
+ </div>
196
+ <div>
197
+ <label class="block text-sm font-medium mb-1">سرعة الصوت</label>
198
+ <input type="range" min="0.5" max="2" step="0.1" value="1" class="w-full">
199
+ </div>
200
+ </div>
201
+ <div class="mt-6 flex justify-end gap-3">
202
+ <button id="save-settings" class="px-4 py-2 bg-blue-600 rounded-lg hover:bg-blue-700 transition">
203
+ حفظ الإعدادات
204
+ </button>
205
+ </div>
206
+ </div>
207
+ </div>
208
+
209
+ <script type="module">
210
+ import {Client} from "https://cdn.jsdelivr.net/npm/@gradio/client/dist/index.min.js";
211
+ // DOM Elements
212
+ const voiceBtn = document.getElementById('voice-btn');
213
+ const voiceStatus = document.getElementById('voice-status');
214
+ const waveform = document.getElementById('waveform');
215
+ const chatMessages = document.getElementById('chat-messages');
216
+ const settingsBtn = document.getElementById('settings-btn');
217
+ const settingsModal = document.getElementById('settings-modal');
218
+ const closeSettings = document.getElementById('close-settings');
219
+ const saveSettings = document.getElementById('save-settings');
220
+
221
+ // Speech recognition setup
222
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
223
+ const recognition = new SpeechRecognition();
224
+ recognition.lang = 'ar-SA';
225
+ recognition.interimResults = false;
226
+ recognition.maxAlternatives = 1;
227
+
228
+ // Speech synthesis setup
229
+ const synth = window.speechSynthesis;
230
+
231
+ // VITS Model Integration
232
+ const voiceModels = {
233
+ 'us': 'wasmdashai/vits-en-v1',
234
+ 'SA1': 'wasmdashai/vits-ar-sa-huba-v1',
235
+ 'SA2': 'wasmdashai/vits-ar-sa-huba-v2',
236
+ 'SA3': 'wasmdashai/vits-ar-sa-A',
237
+ };
238
+
239
+ // State variables
240
+ let isListening = false;
241
+ let conversationHistory = [];
242
+
243
+ // Event Listeners
244
+ voiceBtn.addEventListener('click', toggleVoiceRecognition);
245
+ settingsBtn.addEventListener('click', () => settingsModal.classList.remove('hidden'));
246
+ closeSettings.addEventListener('click', () => settingsModal.classList.add('hidden'));
247
+ saveSettings.addEventListener('click', saveSettingsHandler);
248
+
249
+ // Functions
250
+ function toggleVoiceRecognition() {
251
+ if (isListening) {
252
+ stopListening();
253
+ } else {
254
+ startListening();
255
+ }
256
+ }
257
+
258
+ function startListening() {
259
+ isListening = true;
260
+ voiceBtn.classList.add('animate-pulse');
261
+ voiceBtn.innerHTML = '<i class="fas fa-stop text-xl"></i>';
262
+ voiceStatus.textContent = 'أنا أستمع لك...';
263
+ waveform.classList.remove('hidden');
264
+
265
+ try {
266
+ recognition.start();
267
+ } catch (e) {
268
+ console.error('Error starting recognition:', e);
269
+ stopListening();
270
+ }
271
+ }
272
+
273
+ function stopListening() {
274
+ isListening = false;
275
+ voiceBtn.classList.remove('animate-pulse');
276
+ voiceBtn.innerHTML = '<i class="fas fa-microphone text-xl"></i>';
277
+ voiceStatus.textContent = 'اضغط على الميكروفون للتحدث';
278
+ waveform.classList.add('hidden');
279
+
280
+ try {
281
+ recognition.stop();
282
+ } catch (e) {
283
+ console.error('Error stopping recognition:', e);
284
+ }
285
+ }
286
+
287
+ function saveSettingsHandler() {
288
+ // In a real app, you would save these settings to localStorage or a backend
289
+ settingsModal.classList.add('hidden');
290
+ showSystemMessage('تم حفظ الإعدادات بنجاح');
291
+ }
292
+
293
+ function showSystemMessage(text) {
294
+ const messageDiv = document.createElement('div');
295
+ messageDiv.className = 'text-center text-xs text-gray-400 my-2';
296
+ messageDiv.textContent = text;
297
+ chatMessages.appendChild(messageDiv);
298
+ chatMessages.scrollTop = chatMessages.scrollHeight;
299
+ }
300
+
301
+ function addUserMessage(text) {
302
+ const messageDiv = document.createElement('div');
303
+ messageDiv.className = 'message-bubble user-message';
304
+ messageDiv.innerHTML = `<p>${text}</p>`;
305
+ chatMessages.appendChild(messageDiv);
306
+ chatMessages.scrollTop = chatMessages.scrollHeight;
307
+ }
308
+
309
+ function addAiMessage(text) {
310
+ const messageDiv = document.createElement('div');
311
+ messageDiv.className = 'message-bubble ai-message';
312
+
313
+ // Add typing indicator temporarily
314
+ const typingDiv = document.createElement('div');
315
+ typingDiv.className = 'typing-indicator';
316
+ typingDiv.innerHTML = `
317
+ <div class="typing-dot"></div>
318
+ <div class="typing-dot"></div>
319
+ <div class="typing-dot"></div>
320
+ `;
321
+ messageDiv.appendChild(typingDiv);
322
+ chatMessages.appendChild(messageDiv);
323
+ chatMessages.scrollTop = chatMessages.scrollHeight;
324
+
325
+ // Simulate AI thinking delay
326
+ setTimeout(() => {
327
+ typingDiv.remove();
328
+ messageDiv.innerHTML = `<p>${text}</p>`;
329
+ speakResponse(text);
330
+ }, 1500);
331
+ }
332
+
333
+ async function speakResponse(text) {
334
+ const voiceSelect = document.getElementById('voiceSelect');
335
+ const voice = voiceSelect.value;
336
+
337
+ try {
338
+ const client = await Client.connect("wasmdashai/DemoLahja");
339
+ const result = await client.predict("/predict", {
340
+ text: text,
341
+ name_model: voiceModels[voice],
342
+ speaking_rate: 1.0
343
+ });
344
+
345
+ const audioUrl = result.data?.[0]?.url;
346
+ if (audioUrl) {
347
+ const audio = new Audio(audioUrl);
348
+ audio.play();
349
+ } else {
350
+ fallbackTTS(text);
351
+ }
352
+ } catch (err) {
353
+ console.error("VITS model error:", err);
354
+ fallbackTTS(text);
355
+ }
356
+ }
357
+
358
+ function fallbackTTS(text) {
359
+ if (synth.speaking) {
360
+ synth.cancel();
361
+ }
362
+
363
+ const utterance = new SpeechSynthesisUtterance(text);
364
+ utterance.lang = voiceSelect.value === 'us' ? 'en-US' : 'ar-SA';
365
+ utterance.rate = 1.0;
366
+ synth.speak(utterance);
367
+ }
368
+
369
+ function processUserInput(text) {
370
+ addUserMessage(text);
371
+ conversationHistory.push({ role: 'user', content: text });
372
+
373
+ // In a real app, you would send this to your backend which connects to ChatGPT API
374
+ // For this demo, we'll simulate a response
375
+ simulateChatGPTResponse(text);
376
+ }
377
+
378
+ function simulateChatGPTResponse(userInput) {
379
+ // This is a simulation - in a real app, you would call the ChatGPT API
380
+ const responses = {
381
+ "مرحبا": "مرحباً بك! كيف يمكنني مساعدتك اليوم؟",
382
+ "كيف حالك": "أنا بخير، شكراً لسؤالك! كيف يمكنني مساعدتك؟",
383
+ "ما هو اسمك": "أنا مساعدك الذكي الذي يعمل بالذكاء الاصطناعي. يمكنك تسميتي كما تريد!",
384
+ "شكرا": "على الرحب والسعة! هل هناك أي شيء آخر تحتاج مساعدتي فيه؟",
385
+ "وداعا": "إلى اللقاء! لا تتردد في العودة إذا كنت بحاجة إلى أي مساعدة."
386
+ };
387
+
388
+ const defaultResponse = "أنا آسف، لم أفهم سؤالك بالكامل. هل يمكنك توضيح ذلك؟";
389
+
390
+ const response = responses[userInput.toLowerCase()] || defaultResponse;
391
+
392
+ setTimeout(() => {
393
+ addAiMessage(response);
394
+ conversationHistory.push({ role: 'assistant', content: response });
395
+ }, 2000);
396
+ }
397
+
398
+ // Recognition event handlers
399
+ recognition.onresult = (event) => {
400
+ const speechResult = event.results[0][0].transcript;
401
+ processUserInput(speechResult);
402
+ stopListening();
403
+ };
404
+
405
+ recognition.onerror = (event) => {
406
+ console.error('Speech recognition error', event.error);
407
+ stopListening();
408
+
409
+ if (event.error === 'not-allowed') {
410
+ showSystemMessage('يجب السماح باستخدام الميكروفون لتفعيل هذه الميزة');
411
+ } else {
412
+ showSystemMessage('حدث خطأ في التعرف على الصوت. يرجى المحاولة مرة أخرى');
413
+ }
414
+ };
415
+
416
+ recognition.onend = () => {
417
+ if (isListening) {
418
+ // If we're still supposed to be listening, restart recognition
419
+ setTimeout(() => {
420
+ try {
421
+ recognition.start();
422
+ } catch (e) {
423
+ console.error('Error restarting recognition:', e);
424
+ stopListening();
425
+ }
426
+ }, 500);
427
+ }
428
+ };
429
+
430
+ // Initialize voices when they become available
431
+ if (speechSynthesis.onvoiceschanged !== undefined) {
432
+ speechSynthesis.onvoiceschanged = () => {
433
+ // Voices are now loaded
434
+ };
435
+ }
436
+ </script>
437
+ </body>
438
+ </html>