Spaces:
Running
Running
Update index.html
Browse files- index.html +45 -53
index.html
CHANGED
|
@@ -232,63 +232,55 @@
|
|
| 232 |
animationId = setTimeout(updateVisualizer, 50);
|
| 233 |
}
|
| 234 |
|
| 235 |
-
async function initializePipelines() {
|
| 236 |
-
try {
|
| 237 |
-
addLog('System: Initializing pipelines...');
|
| 238 |
-
[sttPipeline, ttsPipeline] = await Promise.all([
|
| 239 |
-
pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en', { quantized: true }),
|
| 240 |
-
pipeline('text-to-speech', 'Xenova/mms-tts-eng', { quantized: true })
|
| 241 |
-
]);
|
| 242 |
-
|
| 243 |
-
const initProgressCallback = (report) => {
|
| 244 |
-
addLog(`System: ${report.text}`);
|
| 245 |
-
};
|
| 246 |
-
const selectedModel = "TinyLlama-1.1B-Chat-v0.4-q4f16_1-1k";
|
| 247 |
-
llmEngine = await webllm.CreateEngine(
|
| 248 |
-
selectedModel,
|
| 249 |
-
{
|
| 250 |
-
initProgressCallback: initProgressCallback
|
| 251 |
-
}
|
| 252 |
-
);
|
| 253 |
-
|
| 254 |
-
addLog('System: Digital Human Voice Chat initialized with WebLLM. Click "Begin Call" to start.');
|
| 255 |
-
startButton.disabled = false;
|
| 256 |
-
loadingDiv.style.display = 'none';
|
| 257 |
-
} catch (error) {
|
| 258 |
-
console.error('Error initializing pipelines:', error);
|
| 259 |
-
addLog(`System: Error initializing pipelines: ${error.message}`);
|
| 260 |
-
loadingDiv.style.display = 'none';
|
| 261 |
-
}
|
| 262 |
-
}
|
| 263 |
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
}
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
} catch (error) {
|
| 288 |
-
console.error('Error processing speech:', error);
|
| 289 |
-
addLog(`System: Error processing speech: ${error.message}`);
|
| 290 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
}
|
|
|
|
| 292 |
|
| 293 |
function addLog(message) {
|
| 294 |
const now = new Date();
|
|
|
|
| 232 |
animationId = setTimeout(updateVisualizer, 50);
|
| 233 |
}
|
| 234 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
|
| 236 |
+
async function initializePipelines() {
|
| 237 |
+
try {
|
| 238 |
+
addLog('System: Initializing pipelines...');
|
| 239 |
+
[sttPipeline, ttsPipeline] = await Promise.all([
|
| 240 |
+
pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en', { quantized: true }),
|
| 241 |
+
pipeline('text-to-speech', 'Xenova/mms-tts-eng', { quantized: true })
|
| 242 |
+
]);
|
| 243 |
+
|
| 244 |
+
addLog('System: Initializing WebLLM...');
|
| 245 |
+
llmChat = new webllm.ChatModule({
|
| 246 |
+
model: "TinyLlama-1.1B-Chat-v0.4-q4f16_1-1k"
|
| 247 |
+
});
|
| 248 |
+
await llmChat.reload();
|
| 249 |
+
addLog('System: WebLLM initialized successfully.');
|
| 250 |
+
|
| 251 |
+
addLog('System: Digital Human Voice Chat initialized. Click "Begin Call" to start.');
|
| 252 |
+
startButton.disabled = false;
|
| 253 |
+
loadingDiv.style.display = 'none';
|
| 254 |
+
} catch (error) {
|
| 255 |
+
console.error('Error initializing pipelines:', error);
|
| 256 |
+
addLog(`System: Error initializing pipelines: ${error.message}`);
|
| 257 |
+
loadingDiv.style.display = 'none';
|
| 258 |
+
}
|
| 259 |
+
}
|
| 260 |
|
| 261 |
+
async function processSpeech(audio) {
|
| 262 |
+
try {
|
| 263 |
+
if (!sttPipeline || !ttsPipeline || !llmChat) {
|
| 264 |
+
throw new Error('Pipelines not initialized');
|
|
|
|
|
|
|
|
|
|
| 265 |
}
|
| 266 |
+
|
| 267 |
+
const transcription = await sttPipeline(audio);
|
| 268 |
+
addLog(`User: ${transcription.text}`);
|
| 269 |
+
|
| 270 |
+
const reply = await llmChat.generate(transcription.text);
|
| 271 |
+
|
| 272 |
+
const botResponse = reply.trim();
|
| 273 |
+
addLog(`Bot: ${botResponse}`);
|
| 274 |
+
|
| 275 |
+
isSpeaking = true;
|
| 276 |
+
const speechOutput = await ttsPipeline(botResponse);
|
| 277 |
+
await playAudio(speechOutput.audio);
|
| 278 |
+
isSpeaking = false;
|
| 279 |
+
} catch (error) {
|
| 280 |
+
console.error('Error processing speech:', error);
|
| 281 |
+
addLog(`System: Error processing speech: ${error.message}`);
|
| 282 |
}
|
| 283 |
+
}
|
| 284 |
|
| 285 |
function addLog(message) {
|
| 286 |
const now = new Date();
|