File size: 2,562 Bytes
dfe14ad
 
 
 
 
 
 
4a3338b
dfe14ad
4a3338b
dfe14ad
 
 
4a3338b
dfe14ad
 
 
3fe4539
dfe14ad
 
4a3338b
dfe14ad
3fe4539
dfe14ad
 
4a3338b
dfe14ad
 
 
4a3338b
dfe14ad
 
 
 
 
 
 
 
 
 
4a3338b
dfe14ad
 
 
800127e
dfe14ad
 
 
 
 
 
 
3fe4539
dfe14ad
 
 
4a3338b
dfe14ad
 
 
4a3338b
dfe14ad
 
4a3338b
dfe14ad
 
 
4a3338b
3fe4539
 
 
dfe14ad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import uuid
import time
from pathlib import Path
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from huggingface_hub import InferenceClient

# --- config ---
OUT_DIR = Path("outputs")
OUT_DIR.mkdir(exist_ok=True)

HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
    raise RuntimeError("Missing HF_TOKEN environment variable. Add it in Hugging Face Space settings (Repository secrets).")

# Initialize client (Fal.ai provider)
client = InferenceClient(provider="fal-ai", api_key=HF_TOKEN)

app = FastAPI(title="Fal.ai Text-to-Video API")

# Serve the outputs folder at /download/<filename>
app.mount("/download", StaticFiles(directory=str(OUT_DIR)), name="download")

class PromptIn(BaseModel):
    prompt: str
    model: str = "akhaliq/veo3.1-fast"   # default; override if you want

@app.post("/generate", response_class=JSONResponse)
def generate_video(payload: PromptIn, request: Request):
    """
    Generate a video from `payload.prompt` using Fal.ai-backed model.
    Returns: { success, file_url, filename }
    """
    prompt = payload.prompt.strip()
    model = payload.model.strip()
    if not prompt:
        raise HTTPException(status_code=400, detail="prompt is required")

    # unique filename
    filename = f"{uuid.uuid4().hex}.mp4"
    out_path = OUT_DIR / filename

    try:
        # Call text_to_video (synchronous call returns bytes)
        # Note: This may take some time depending on model & quota.
        print("⏳ text_to_video request:", model, "prompt len:", len(prompt))
        video_bytes = client.text_to_video(prompt=prompt, model=model)
        if not video_bytes:
            raise RuntimeError("Empty response from text_to_video")

        # Save file
        with open(out_path, "wb") as f:
            f.write(video_bytes)

        # Build absolute URL to download
        base = str(request.base_url).rstrip("/")
        file_url = f"{base}/download/{filename}"

        print("βœ… Video saved:", out_path)
        return {"success": True, "filename": filename, "file_url": file_url}
    except Exception as e:
        # log but return friendly message
        print("❌ Error generating video:", repr(e))
        # If provider returns specific errors (quota / 429), bubble them up
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/")
def root():
    return {"message": "Text-to-Video API running. POST /generate with JSON {\"prompt\":\"...\"}."}