Khushi Dahiya
commited on
Commit
·
3cfeaa8
1
Parent(s):
aa792b5
adding audio componenet
Browse files- demos/melodyflow_api.py +119 -3
demos/melodyflow_api.py
CHANGED
|
@@ -60,6 +60,31 @@ MAX_QUEUE_SIZE = 100
|
|
| 60 |
MAX_CONCURRENT_BATCHES = 2 # Number of concurrent batch processors
|
| 61 |
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
@dataclass
|
| 64 |
class GenerationRequest:
|
| 65 |
"""Represents a single generation request"""
|
|
@@ -405,6 +430,97 @@ def predict_concurrent(model: str, text: str, solver: str = "euler",
|
|
| 405 |
raise gr.Error(f"Generation failed: {str(e)}")
|
| 406 |
|
| 407 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
def create_optimized_interface():
|
| 409 |
"""Create Gradio interface optimized for concurrent usage"""
|
| 410 |
|
|
@@ -436,10 +552,10 @@ def create_optimized_interface():
|
|
| 436 |
generate_btn = gr.Button("Generate", variant="primary")
|
| 437 |
|
| 438 |
with gr.Column():
|
| 439 |
-
output = gr.
|
| 440 |
|
| 441 |
generate_btn.click(
|
| 442 |
-
fn=
|
| 443 |
inputs=[model, text, solver, steps, gr.State(0.0),
|
| 444 |
gr.State(False), gr.State(0.0), duration, melody],
|
| 445 |
outputs=output,
|
|
@@ -447,7 +563,7 @@ def create_optimized_interface():
|
|
| 447 |
)
|
| 448 |
|
| 449 |
gr.Examples(
|
| 450 |
-
fn=
|
| 451 |
examples=[
|
| 452 |
[f"{MODEL_PREFIX}melodyflow-t24-30secs",
|
| 453 |
"80s electronic track with melodic synthesizers",
|
|
|
|
| 60 |
MAX_CONCURRENT_BATCHES = 2 # Number of concurrent batch processors
|
| 61 |
|
| 62 |
|
| 63 |
+
class FileCleaner:
|
| 64 |
+
"""Simple file cleaner for temporary audio files"""
|
| 65 |
+
def __init__(self, file_lifetime: float = 3600):
|
| 66 |
+
self.file_lifetime = file_lifetime
|
| 67 |
+
self.files = []
|
| 68 |
+
|
| 69 |
+
def add(self, path: tp.Union[str, Path]):
|
| 70 |
+
self._cleanup()
|
| 71 |
+
self.files.append((time.time(), Path(path)))
|
| 72 |
+
|
| 73 |
+
def _cleanup(self):
|
| 74 |
+
now = time.time()
|
| 75 |
+
for time_added, path in list(self.files):
|
| 76 |
+
if now - time_added > self.file_lifetime:
|
| 77 |
+
if path.exists():
|
| 78 |
+
path.unlink()
|
| 79 |
+
self.files.pop(0)
|
| 80 |
+
else:
|
| 81 |
+
break
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# Global file cleaner
|
| 85 |
+
file_cleaner = FileCleaner()
|
| 86 |
+
|
| 87 |
+
|
| 88 |
@dataclass
|
| 89 |
class GenerationRequest:
|
| 90 |
"""Represents a single generation request"""
|
|
|
|
| 430 |
raise gr.Error(f"Generation failed: {str(e)}")
|
| 431 |
|
| 432 |
|
| 433 |
+
def predict_concurrent_ui(model: str, text: str, solver: str = "euler",
|
| 434 |
+
steps: int = 50, target_flowstep: float = 0.0,
|
| 435 |
+
regularize: bool = False, regularization_strength: float = 0.0,
|
| 436 |
+
duration: float = 10.0, melody: tp.Optional[str] = None) -> str:
|
| 437 |
+
"""
|
| 438 |
+
UI-optimized predict function that returns audio file path for Gradio Audio component
|
| 439 |
+
"""
|
| 440 |
+
|
| 441 |
+
# Adjust steps for melody editing
|
| 442 |
+
if melody is not None:
|
| 443 |
+
steps = steps // 2 if solver == "midpoint" else steps // 5
|
| 444 |
+
|
| 445 |
+
# Submit request to batch processor
|
| 446 |
+
future = batch_processor.submit_request(
|
| 447 |
+
text=text,
|
| 448 |
+
melody=melody,
|
| 449 |
+
solver=solver,
|
| 450 |
+
steps=steps,
|
| 451 |
+
target_flowstep=target_flowstep,
|
| 452 |
+
regularize=regularize,
|
| 453 |
+
regularization_strength=regularization_strength,
|
| 454 |
+
duration=duration,
|
| 455 |
+
model=model
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
# Wait for result with timeout
|
| 459 |
+
try:
|
| 460 |
+
result = future.result(timeout=120) # 2 minute timeout
|
| 461 |
+
|
| 462 |
+
# Convert base64 result to audio file for UI
|
| 463 |
+
if isinstance(result, dict) and "audio" in result:
|
| 464 |
+
print(f"✅ Received audio result, converting to file...")
|
| 465 |
+
|
| 466 |
+
# Decode base64 and save to temporary file
|
| 467 |
+
import base64
|
| 468 |
+
from tempfile import NamedTemporaryFile
|
| 469 |
+
|
| 470 |
+
audio_data = base64.b64decode(result["audio"])
|
| 471 |
+
with NamedTemporaryFile(mode="wb", suffix=".wav", delete=False) as temp_file:
|
| 472 |
+
temp_file.write(audio_data)
|
| 473 |
+
temp_file_path = temp_file.name
|
| 474 |
+
|
| 475 |
+
file_cleaner.add(temp_file_path) # Add to cleanup queue
|
| 476 |
+
print(f"🎵 Audio saved to: {temp_file_path}")
|
| 477 |
+
return temp_file_path
|
| 478 |
+
else:
|
| 479 |
+
raise gr.Error("No audio data received")
|
| 480 |
+
|
| 481 |
+
except TimeoutError:
|
| 482 |
+
print("⏰ Request timeout")
|
| 483 |
+
raise gr.Error("Request timeout - server is overloaded")
|
| 484 |
+
except Exception as e:
|
| 485 |
+
print(f"💥 Exception: {str(e)}")
|
| 486 |
+
raise gr.Error(f"Generation failed: {str(e)}")
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def predict_concurrent(model: str, text: str, solver: str = "euler",
|
| 490 |
+
steps: int = 50, target_flowstep: float = 0.0,
|
| 491 |
+
regularize: bool = False, regularization_strength: float = 0.0,
|
| 492 |
+
duration: float = 10.0, melody: tp.Optional[str] = None) -> dict:
|
| 493 |
+
"""
|
| 494 |
+
API predict function that returns base64 audio data (for API endpoints)
|
| 495 |
+
"""
|
| 496 |
+
|
| 497 |
+
# Adjust steps for melody editing
|
| 498 |
+
if melody is not None:
|
| 499 |
+
steps = steps // 2 if solver == "midpoint" else steps // 5
|
| 500 |
+
|
| 501 |
+
# Submit request to batch processor
|
| 502 |
+
future = batch_processor.submit_request(
|
| 503 |
+
text=text,
|
| 504 |
+
melody=melody,
|
| 505 |
+
solver=solver,
|
| 506 |
+
steps=steps,
|
| 507 |
+
target_flowstep=target_flowstep,
|
| 508 |
+
regularize=regularize,
|
| 509 |
+
regularization_strength=regularization_strength,
|
| 510 |
+
duration=duration,
|
| 511 |
+
model=model
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
# Wait for result with timeout
|
| 515 |
+
try:
|
| 516 |
+
result = future.result(timeout=120) # 2 minute timeout
|
| 517 |
+
return result
|
| 518 |
+
except TimeoutError:
|
| 519 |
+
raise gr.Error("Request timeout - server is overloaded")
|
| 520 |
+
except Exception as e:
|
| 521 |
+
raise gr.Error(f"Generation failed: {str(e)}")
|
| 522 |
+
|
| 523 |
+
|
| 524 |
def create_optimized_interface():
|
| 525 |
"""Create Gradio interface optimized for concurrent usage"""
|
| 526 |
|
|
|
|
| 552 |
generate_btn = gr.Button("Generate", variant="primary")
|
| 553 |
|
| 554 |
with gr.Column():
|
| 555 |
+
output = gr.Audio(label="Generated Audio")
|
| 556 |
|
| 557 |
generate_btn.click(
|
| 558 |
+
fn=predict_concurrent_ui,
|
| 559 |
inputs=[model, text, solver, steps, gr.State(0.0),
|
| 560 |
gr.State(False), gr.State(0.0), duration, melody],
|
| 561 |
outputs=output,
|
|
|
|
| 563 |
)
|
| 564 |
|
| 565 |
gr.Examples(
|
| 566 |
+
fn=predict_concurrent_ui,
|
| 567 |
examples=[
|
| 568 |
[f"{MODEL_PREFIX}melodyflow-t24-30secs",
|
| 569 |
"80s electronic track with melodic synthesizers",
|