Eueuiaa commited on
Commit
d993e45
·
verified ·
1 Parent(s): 6eb870c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -67
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app_refactored_with_postprod.py (com Presets de Guiagem e Opções LTX Completas)
2
 
3
  import gradio as gr
4
  import os
@@ -10,13 +10,13 @@ from pathlib import Path
10
  try:
11
  from api.ltx_server_refactored import video_generation_service
12
  except ImportError:
13
- print("ERRO FATAL: Não foi possível importar 'video_generation_service' de 'api.ltx_server_refactored'.")
14
  sys.exit(1)
15
 
16
  try:
17
  from api.seedvr_server import SeedVRServer
18
  except ImportError:
19
- print("AVISO: Não foi possível importar SeedVRServer. A aba de upscaling SeedVR será desativada.")
20
  SeedVRServer = None
21
 
22
  seedvr_inference_server = SeedVRServer() if SeedVRServer else None
@@ -26,19 +26,14 @@ def create_initial_state():
26
  return {"low_res_video": None, "low_res_latents": None, "used_seed": None}
27
 
28
  # --- FUNÇÕES WRAPPER PARA A UI ---
29
-
30
  def run_generate_base_video(
31
- # Parâmetros de Geração
32
- generation_mode, prompt, neg_prompt, start_img, height, width, duration, cfg, seed, randomize_seed,
33
- fp_guidance_preset, fp_guidance_scale_list, fp_stg_scale_list, fp_timesteps_list,
34
  fp_num_inference_steps, ship_initial_inference_steps, ship_final_inference_steps,
 
35
  progress=gr.Progress(track_tqdm=True)
36
  ):
37
- """
38
- Função wrapper que decide qual pipeline de backend chamar, passando todas as configurações LTX.
39
- """
40
  print(f"UI: Iniciando geração no modo: {generation_mode}")
41
-
42
  try:
43
  initial_image_conditions = []
44
  if start_img:
@@ -48,36 +43,33 @@ def run_generate_base_video(
48
 
49
  used_seed = None if randomize_seed else seed
50
 
51
- # Agrupa todas as configurações LTX em um único dicionário para o backend
52
  ltx_configs = {
53
  "guidance_preset": fp_guidance_preset,
54
  "guidance_scale_list": fp_guidance_scale_list,
55
  "stg_scale_list": fp_stg_scale_list,
56
  "timesteps_list": fp_timesteps_list,
 
 
 
57
  }
58
 
59
- # Decide qual função de backend chamar com base no modo
60
  if generation_mode == "Narrativa (Múltiplos Prompts)":
61
  video_path, tensor_path, final_seed = video_generation_service.generate_narrative_low(
62
- prompt=prompt, negative_prompt=neg_prompt,
63
- height=height, width=width, duration=duration,
64
- guidance_scale=cfg, seed=used_seed,
65
- initial_image_conditions=initial_image_conditions,
66
- fp_num_inference_steps=fp_num_inference_steps, ship_initial_inference_steps=ship_initial_inference_steps, ship_final_inference_steps=ship_final_inference_steps,
67
  ltx_configs_override=ltx_configs,
68
  )
69
- else: # Modo "Simples (Prompt Único)"
70
  video_path, tensor_path, final_seed = video_generation_service.generate_single_low(
71
- prompt=prompt, negative_prompt=neg_prompt,
72
- height=height, width=width, duration=duration,
73
- guidance_scale=cfg, seed=used_seed,
74
- initial_image_conditions=initial_image_conditions,
75
- fp_num_inference_steps=fp_num_inference_steps, ship_initial_inference_steps=ship_initial_inference_steps, ship_final_inference_steps=ship_final_inference_steps,
76
  ltx_configs_override=ltx_configs,
77
  )
78
 
 
 
 
79
  new_state = {"low_res_video": video_path, "low_res_latents": tensor_path, "used_seed": final_seed}
80
-
81
  return video_path, new_state, gr.update(visible=True)
82
 
83
  except Exception as e:
@@ -116,7 +108,6 @@ def run_seedvr_upscaling(state, seed, resolution, batch_size, fps, progress=gr.P
116
  # --- DEFINIÇÃO DA INTERFACE GRADIO ---
117
  with gr.Blocks() as demo:
118
  gr.Markdown("# LTX Video - Geração e Pós-Produção por Etapas")
119
-
120
  app_state = gr.State(value=create_initial_state())
121
 
122
  with gr.Row():
@@ -131,21 +122,16 @@ with gr.Blocks() as demo:
131
  neg_prompt_input = gr.Textbox(label="Negative Prompt", value="blurry, low quality, bad anatomy", lines=2)
132
  start_image = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload"])
133
 
134
- with gr.Accordion("Parâmetros Principais", open=False):
135
- duration_input = gr.Slider(label="Duração Total (s)", value=1, step=1, minimum=1, maximum=40)
136
  with gr.Row():
137
- height_input = gr.Slider(label="Height", value=720, step=32, minimum=256, maximum=1024)
138
- width_input = gr.Slider(label="Width", value=720, step=32, minimum=256, maximum=1024)
139
  with gr.Row():
140
  seed_input = gr.Number(label="Seed", value=42, precision=0)
141
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
142
 
143
  with gr.Accordion("Opções Adicionais LTX (Avançado)", open=False):
144
- cfg_input = gr.Slider(label="Guidance Scale (CFG)", info="Afeta o refinamento (se usado) e não tem efeito no First Pass dos modelos 'distilled'.", value=0.0, step=1, minimum=0.0, maximum=10.0)
145
- fp_num_inference_steps = gr.Slider(label="Passos de Inferência (First Pass)", minimum=10, maximum=100, step=1, value=10)
146
- ship_initial_inference_steps = gr.Slider(label="Passos de Inferência (Ship First)", minimum=0, maximum=100, step=1, value=0)
147
- ship_final_inference_steps = gr.Slider(label="Passos de Inferência (Ship Last)", minimum=0, maximum=100, step=1, value=0)
148
-
149
  with gr.Tabs():
150
  with gr.TabItem("Guiagem (First Pass)"):
151
  fp_guidance_preset = gr.Dropdown(
@@ -158,54 +144,36 @@ with gr.Blocks() as demo:
158
  fp_guidance_scale_list = gr.Textbox(label="Lista de Guidance Scale", value="[1, 1, 6, 8, 6, 1, 1]")
159
  fp_stg_scale_list = gr.Textbox(label="Lista de STG Scale (Movimento)", value="[0, 0, 4, 4, 4, 2, 1]")
160
  fp_timesteps_list = gr.Textbox(label="Lista de Guidance Timesteps", value="[1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]")
161
-
 
 
 
 
 
162
  generate_low_btn = gr.Button("1. Gerar Vídeo Base", variant="primary")
163
 
164
  with gr.Column(scale=1):
165
  gr.Markdown("### Vídeo Base Gerado")
166
  low_res_video_output = gr.Video(label="O resultado da Etapa 1 aparecerá aqui", interactive=False)
167
 
168
- with gr.Group(visible=False) as post_prod_group:
169
- gr.Markdown("## Etapa 2: Pós-Produção")
170
-
171
- with gr.Tabs():
172
- with gr.TabItem("🚀 Upscaler Textura (LTX)"):
173
- with gr.Row():
174
- with gr.Column(scale=1):
175
- gr.Markdown("Reutiliza o prompt e CFG para refinar a textura.")
176
- ltx_refine_btn = gr.Button("Aplicar Refinamento LTX", variant="primary")
177
- with gr.Column(scale=1):
178
- ltx_refined_video_output = gr.Video(label="Vídeo com Textura Refinada", interactive=False)
179
-
180
- with gr.TabItem("✨ Upscaler SeedVR"):
181
- with gr.Row():
182
- with gr.Column(scale=1):
183
- seedvr_seed = gr.Slider(minimum=0, maximum=999999, value=42, step=1, label="Seed")
184
- seedvr_resolution = gr.Slider(minimum=720, maximum=1440, value=1072, step=8, label="Resolução Vertical")
185
- seedvr_batch_size = gr.Slider(minimum=1, maximum=16, value=4, step=1, label="Batch Size por GPU")
186
- seedvr_fps_output = gr.Number(label="FPS de Saída (0 = original)", value=0)
187
- run_seedvr_button = gr.Button("Iniciar Upscaling SeedVR", variant="primary", interactive=(seedvr_inference_server is not None))
188
- with gr.Column(scale=1):
189
- seedvr_video_output = gr.Video(label="Vídeo com Upscale SeedVR", interactive=False)
190
- seedvr_status_box = gr.Textbox(label="Status", value="Aguardando...", lines=3, interactive=False)
191
-
192
- # --- LÓGICA DE EVENTOS ---
193
- def update_custom_guidance_visibility(preset_choice):
194
  return gr.update(visible=(preset_choice == "Customizado"))
195
 
196
  fp_guidance_preset.change(fn=update_custom_guidance_visibility, inputs=fp_guidance_preset, outputs=custom_guidance_group)
197
 
198
  all_ltx_inputs = [
199
- fp_guidance_preset, fp_guidance_scale_list, fp_stg_scale_list, fp_timesteps_list
 
200
  ]
201
 
202
  generate_low_btn.click(
203
  fn=run_generate_base_video,
204
  inputs=[
205
- generation_mode_input, prompt_input, neg_prompt_input, start_image, height_input, width_input,
206
- duration_input, cfg_input, seed_input, randomize_seed,
207
- fp_num_inference_steps, ship_initial_inference_steps, ship_final_inference_steps,
208
- *all_ltx_inputs
 
209
  ],
210
  outputs=[low_res_video_output, app_state, post_prod_group]
211
  )
 
1
+ # app.py
2
 
3
  import gradio as gr
4
  import os
 
10
  try:
11
  from api.ltx_server_refactored import video_generation_service
12
  except ImportError:
13
+ print("ERRO FATAL: Não foi possível importar 'video_generation_service'.")
14
  sys.exit(1)
15
 
16
  try:
17
  from api.seedvr_server import SeedVRServer
18
  except ImportError:
19
+ print("AVISO: Não foi possível importar SeedVRServer.")
20
  SeedVRServer = None
21
 
22
  seedvr_inference_server = SeedVRServer() if SeedVRServer else None
 
26
  return {"low_res_video": None, "low_res_latents": None, "used_seed": None}
27
 
28
  # --- FUNÇÕES WRAPPER PARA A UI ---
 
29
  def run_generate_base_video(
30
+ generation_mode, prompt, neg_prompt, start_img, height, width, duration,
31
+ seed, randomize_seed,
 
32
  fp_num_inference_steps, ship_initial_inference_steps, ship_final_inference_steps,
33
+ fp_guidance_preset, fp_guidance_scale_list, fp_stg_scale_list, fp_timesteps_list,
34
  progress=gr.Progress(track_tqdm=True)
35
  ):
 
 
 
36
  print(f"UI: Iniciando geração no modo: {generation_mode}")
 
37
  try:
38
  initial_image_conditions = []
39
  if start_img:
 
43
 
44
  used_seed = None if randomize_seed else seed
45
 
 
46
  ltx_configs = {
47
  "guidance_preset": fp_guidance_preset,
48
  "guidance_scale_list": fp_guidance_scale_list,
49
  "stg_scale_list": fp_stg_scale_list,
50
  "timesteps_list": fp_timesteps_list,
51
+ "fp_num_inference_steps": fp_num_inference_steps,
52
+ "ship_initial_inference_steps": ship_initial_inference_steps,
53
+ "ship_final_inference_steps": ship_final_inference_steps,
54
  }
55
 
 
56
  if generation_mode == "Narrativa (Múltiplos Prompts)":
57
  video_path, tensor_path, final_seed = video_generation_service.generate_narrative_low(
58
+ prompt=prompt, negative_prompt=neg_prompt, height=height, width=width, duration=duration,
59
+ seed=used_seed, initial_image_conditions=initial_image_conditions,
 
 
 
60
  ltx_configs_override=ltx_configs,
61
  )
62
+ else:
63
  video_path, tensor_path, final_seed = video_generation_service.generate_single_low(
64
+ prompt=prompt, negative_prompt=neg_prompt, height=height, width=width, duration=duration,
65
+ seed=used_seed, initial_image_conditions=initial_image_conditions,
 
 
 
66
  ltx_configs_override=ltx_configs,
67
  )
68
 
69
+ if video_path is None:
70
+ raise gr.Error("A geração do vídeo falhou no backend. Verifique os logs do console para detalhes.")
71
+
72
  new_state = {"low_res_video": video_path, "low_res_latents": tensor_path, "used_seed": final_seed}
 
73
  return video_path, new_state, gr.update(visible=True)
74
 
75
  except Exception as e:
 
108
  # --- DEFINIÇÃO DA INTERFACE GRADIO ---
109
  with gr.Blocks() as demo:
110
  gr.Markdown("# LTX Video - Geração e Pós-Produção por Etapas")
 
111
  app_state = gr.State(value=create_initial_state())
112
 
113
  with gr.Row():
 
122
  neg_prompt_input = gr.Textbox(label="Negative Prompt", value="blurry, low quality, bad anatomy", lines=2)
123
  start_image = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload"])
124
 
125
+ with gr.Accordion("Parâmetros Principais", open=True):
126
+ duration_input = gr.Slider(label="Duração Total (s)", value=8, step=1, minimum=1, maximum=40)
127
  with gr.Row():
128
+ height_input = gr.Slider(label="Height", value=512, step=32, minimum=256, maximum=1024)
129
+ width_input = gr.Slider(label="Width", value=704, step=32, minimum=256, maximum=1024)
130
  with gr.Row():
131
  seed_input = gr.Number(label="Seed", value=42, precision=0)
132
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
133
 
134
  with gr.Accordion("Opções Adicionais LTX (Avançado)", open=False):
 
 
 
 
 
135
  with gr.Tabs():
136
  with gr.TabItem("Guiagem (First Pass)"):
137
  fp_guidance_preset = gr.Dropdown(
 
144
  fp_guidance_scale_list = gr.Textbox(label="Lista de Guidance Scale", value="[1, 1, 6, 8, 6, 1, 1]")
145
  fp_stg_scale_list = gr.Textbox(label="Lista de STG Scale (Movimento)", value="[0, 0, 4, 4, 4, 2, 1]")
146
  fp_timesteps_list = gr.Textbox(label="Lista de Guidance Timesteps", value="[1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]")
147
+
148
+ with gr.TabItem("Scheduler"):
149
+ fp_num_inference_steps = gr.Slider(label="Passos de Inferência", minimum=10, maximum=100, step=1, value=30)
150
+ ship_initial_inference_steps = gr.Slider(label="Pular Passos Iniciais (img2vid)", minimum=0, maximum=100, step=1, value=0)
151
+ ship_final_inference_steps = gr.Slider(label="Pular Passos Finais", minimum=0, maximum=100, step=1, value=0)
152
+
153
  generate_low_btn = gr.Button("1. Gerar Vídeo Base", variant="primary")
154
 
155
  with gr.Column(scale=1):
156
  gr.Markdown("### Vídeo Base Gerado")
157
  low_res_video_output = gr.Video(label="O resultado da Etapa 1 aparecerá aqui", interactive=False)
158
 
159
+ def update_custom_guidance_visibility(preset_choice):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  return gr.update(visible=(preset_choice == "Customizado"))
161
 
162
  fp_guidance_preset.change(fn=update_custom_guidance_visibility, inputs=fp_guidance_preset, outputs=custom_guidance_group)
163
 
164
  all_ltx_inputs = [
165
+ fp_num_inference_steps, ship_initial_inference_steps, ship_final_inference_steps,
166
+ fp_guidance_preset, fp_guidance_scale_list, fp_stg_scale_list, fp_timesteps_list,
167
  ]
168
 
169
  generate_low_btn.click(
170
  fn=run_generate_base_video,
171
  inputs=[
172
+ generation_mode_input, prompt_input, neg_prompt_input, start_image,
173
+ height_input, width_input, duration_input,
174
+ seed_input, randomize_seed,
175
+ *all_ltx_inputs,
176
+ # Passa o `cfg` por último, pois não está no `all_ltx_inputs`
177
  ],
178
  outputs=[low_res_video_output, app_state, post_prod_group]
179
  )