Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,13 +3,16 @@ from fastapi.responses import StreamingResponse, JSONResponse
|
|
| 3 |
import outetts
|
| 4 |
import io
|
| 5 |
import json
|
|
|
|
|
|
|
|
|
|
| 6 |
# Initialize the interface
|
| 7 |
interface = outetts.Interface(
|
| 8 |
config=outetts.ModelConfig.auto_config(
|
| 9 |
model=outetts.Models.VERSION_1_0_SIZE_1B,
|
| 10 |
# For llama.cpp backend
|
| 11 |
-
#backend=outetts.Backend.LLAMACPP,
|
| 12 |
-
#quantization=outetts.LlamaCppQuantization.FP16
|
| 13 |
# For transformers backend
|
| 14 |
backend=outetts.Backend.HF,
|
| 15 |
)
|
|
@@ -24,32 +27,40 @@ app = FastAPI()
|
|
| 24 |
def greet_json():
|
| 25 |
return {"Hello": "World!"}
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
@app.websocket("/ws/tts")
|
| 28 |
async def websocket_tts(websocket: WebSocket):
|
| 29 |
await websocket.accept()
|
|
|
|
| 30 |
try:
|
| 31 |
while True:
|
| 32 |
-
# Empfange Text-Chunk vom Client
|
| 33 |
data = await websocket.receive_text()
|
| 34 |
-
#
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
generation_type=outetts.GenerationType.CHUNKED,
|
| 39 |
-
speaker=speaker,
|
| 40 |
-
sampler_config=outetts.SamplerConfig(
|
| 41 |
-
temperature=0.4
|
| 42 |
-
),
|
| 43 |
-
)
|
| 44 |
-
)
|
| 45 |
-
# Speichere Audio temporär als Datei
|
| 46 |
-
temp_path = "temp.wav"
|
| 47 |
-
output.save(temp_path)
|
| 48 |
-
with open(temp_path, "rb") as f:
|
| 49 |
-
audio_bytes = f.read()
|
| 50 |
-
import os
|
| 51 |
-
os.remove(temp_path)
|
| 52 |
-
# Sende Audiodaten als Bytes zurück
|
| 53 |
-
await websocket.send_bytes(audio_bytes)
|
| 54 |
except WebSocketDisconnect:
|
| 55 |
-
|
|
|
|
|
|
|
|
|
| 3 |
import outetts
|
| 4 |
import io
|
| 5 |
import json
|
| 6 |
+
import asyncio
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
# Initialize the interface
|
| 10 |
interface = outetts.Interface(
|
| 11 |
config=outetts.ModelConfig.auto_config(
|
| 12 |
model=outetts.Models.VERSION_1_0_SIZE_1B,
|
| 13 |
# For llama.cpp backend
|
| 14 |
+
# backend=outetts.Backend.LLAMACPP,
|
| 15 |
+
# quantization=outetts.LlamaCppQuantization.FP16
|
| 16 |
# For transformers backend
|
| 17 |
backend=outetts.Backend.HF,
|
| 18 |
)
|
|
|
|
| 27 |
def greet_json():
|
| 28 |
return {"Hello": "World!"}
|
| 29 |
|
| 30 |
+
async def process_chunk(text_chunk: str, websocket: WebSocket):
|
| 31 |
+
try:
|
| 32 |
+
output = interface.generate(
|
| 33 |
+
config=outetts.GenerationConfig(
|
| 34 |
+
text=text_chunk,
|
| 35 |
+
generation_type=outetts.GenerationType.CHUNKED,
|
| 36 |
+
speaker=speaker,
|
| 37 |
+
sampler_config=outetts.SamplerConfig(
|
| 38 |
+
temperature=0.4
|
| 39 |
+
),
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
# Save audio to buffer
|
| 43 |
+
audio_buffer = io.BytesIO()
|
| 44 |
+
output.save(audio_buffer)
|
| 45 |
+
audio_buffer.seek(0)
|
| 46 |
+
audio_bytes = audio_buffer.read()
|
| 47 |
+
# Send audio bytes back
|
| 48 |
+
await websocket.send_bytes(audio_bytes)
|
| 49 |
+
except Exception as e:
|
| 50 |
+
await websocket.send_text(json.dumps({"error": str(e)}))
|
| 51 |
+
|
| 52 |
@app.websocket("/ws/tts")
|
| 53 |
async def websocket_tts(websocket: WebSocket):
|
| 54 |
await websocket.accept()
|
| 55 |
+
tasks: set[asyncio.Task] = set()
|
| 56 |
try:
|
| 57 |
while True:
|
|
|
|
| 58 |
data = await websocket.receive_text()
|
| 59 |
+
# Schedule processing without awaiting
|
| 60 |
+
task = asyncio.create_task(process_chunk(data, websocket))
|
| 61 |
+
tasks.add(task)
|
| 62 |
+
task.add_done_callback(lambda t: tasks.discard(t))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
except WebSocketDisconnect:
|
| 64 |
+
# Cancel all pending tasks
|
| 65 |
+
for task in tasks:
|
| 66 |
+
task.cancel()
|