multimodalart HF Staff commited on
Commit
b0661e2
Β·
verified Β·
1 Parent(s): 99c0af7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -51
app.py CHANGED
@@ -102,22 +102,18 @@ def _generate_video_segment(input_image_path: str, output_image_path: str, promp
102
  )
103
  return result[0]["video"]
104
 
105
- def build_relight_prompt(light_type, light_direction, light_intensity, custom_prompt, user_prompt):
106
  """Build the relighting prompt based on user selections."""
107
 
108
- # Priority 1: User's own prompt (translated to Chinese)
109
- if user_prompt and user_prompt.strip():
110
- translated = translate_to_chinese(user_prompt)
111
  # Add trigger word if not already present
112
  if "ι‡ζ–°η…§ζ˜Ž" not in translated:
113
  return f"ι‡ζ–°η…§ζ˜Ž,{translated}"
114
  return translated
115
 
116
- # Priority 2: Custom prompt field
117
- if custom_prompt and custom_prompt.strip():
118
- return f"ι‡ζ–°η…§ζ˜Ž,{custom_prompt}"
119
-
120
- # Priority 3: Build from controls
121
  prompt_parts = ["ι‡ζ–°η…§ζ˜Ž"]
122
 
123
  # Light type descriptions
@@ -173,8 +169,7 @@ def infer_relight(
173
  light_type,
174
  light_direction,
175
  light_intensity,
176
- custom_prompt,
177
- user_prompt,
178
  seed,
179
  randomize_seed,
180
  true_guidance_scale,
@@ -184,8 +179,8 @@ def infer_relight(
184
  prev_output = None,
185
  progress=gr.Progress(track_tqdm=True)
186
  ):
187
- prompt = build_relight_prompt(light_type, light_direction, light_intensity, custom_prompt, user_prompt)
188
- print(f"Generated Prompt: {prompt}")
189
 
190
  if randomize_seed:
191
  seed = random.randint(0, MAX_SEED)
@@ -206,7 +201,7 @@ def infer_relight(
206
 
207
  result = pipe(
208
  image=pil_images,
209
- prompt=prompt,
210
  height=height if height != 0 else None,
211
  width=width if width != 0 else None,
212
  num_inference_steps=num_inference_steps,
@@ -215,7 +210,7 @@ def infer_relight(
215
  num_images_per_prompt=1,
216
  ).images[0]
217
 
218
- return result, seed, prompt
219
 
220
  def create_video_between_images(input_image, output_image, prompt: str, request: gr.Request) -> str:
221
  """Create a video between the input and output images."""
@@ -250,7 +245,7 @@ css = '''#col-container { max-width: 800px; margin: 0 auto; }
250
  #examples{max-width: 800px; margin: 0 auto; }'''
251
 
252
  def reset_all():
253
- return ["none", "none", "none", "", "", False, True]
254
 
255
  def end_reset():
256
  return False
@@ -283,13 +278,6 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
283
  gr.Markdown("""
284
  Qwen Image Edit 2509 for Image Relighting ✨
285
  Using [dx8152's Qwen-Image-Edit-2509-Relight LoRA](https://huggingface.co/dx8152/Qwen-Image-Edit-2509-Relight) and [linoyts/Qwen-Image-Edit-Rapid-AIO](https://huggingface.co/linoyts/Qwen-Image-Edit-Rapid-AIO) for 4-step inference πŸ’¨
286
-
287
- **Three ways to use:**
288
- 1. 🌟 **Write your own prompt** in any language (automatically translated to Chinese)
289
- 2. Use the preset lighting controls
290
- 3. Write a custom Chinese prompt with the trigger word "ι‡ζ–°η…§ζ˜Ž"
291
-
292
- Example: `Add dramatic sunset lighting from the left` or `使用ηͺ—εΈ˜ι€ε…‰οΌˆζŸ”ε’ŒζΌ«ε°„οΌ‰ηš„ε…‰ηΊΏε―Ήε›Ύη‰‡θΏ›θ‘Œι‡ζ–°η…§ζ˜Ž`
293
  """
294
  )
295
 
@@ -299,16 +287,6 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
299
  prev_output = gr.Image(value=None, visible=False)
300
  is_reset = gr.Checkbox(value=False, visible=False)
301
 
302
- # User's own prompt (highest priority)
303
- with gr.Group():
304
- gr.Markdown("### 🌟 Your Prompt (Any Language)")
305
- user_prompt = gr.Textbox(
306
- label="Describe the lighting you want",
307
- placeholder="Example: 'Add warm sunset lighting from the right' or 'Make it look like it's lit by neon signs' or 'Add dramatic spotlight from above'",
308
- lines=2,
309
- info="Write in any language! It will be automatically translated to Chinese for the model."
310
- )
311
-
312
  with gr.Tab("Lighting Controls"):
313
  light_type = gr.Dropdown(
314
  label="Light Type",
@@ -351,12 +329,11 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
351
  )
352
 
353
  with gr.Tab("Custom Prompt"):
354
- custom_prompt = gr.Textbox(
355
- label="Custom Chinese Relighting Prompt (Optional)",
356
- placeholder="Example: 使用ηͺ—εΈ˜ι€ε…‰οΌˆζŸ”ε’ŒζΌ«ε°„οΌ‰ηš„ε…‰ηΊΏε―Ήε›Ύη‰‡θΏ›θ‘Œι‡ζ–°η…§ζ˜Ž\nLeave empty to use controls or user prompt above",
357
  lines=3
358
  )
359
- gr.Markdown("*Note: This field is for Chinese prompts. The trigger word 'ι‡ζ–°η…§ζ˜Ž' will be added automatically. If you entered text in 'Your Prompt' above, it takes priority.*")
360
 
361
  with gr.Row():
362
  reset_btn = gr.Button("Reset")
@@ -378,7 +355,7 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
378
  video_output = gr.Video(label="Generated Video", show_download_button=True, autoplay=True)
379
 
380
  inputs = [
381
- image, light_type, light_direction, light_intensity, custom_prompt, user_prompt,
382
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
383
  ]
384
  outputs = [result, seed, prompt_preview]
@@ -387,7 +364,7 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
387
  reset_btn.click(
388
  fn=reset_all,
389
  inputs=None,
390
- outputs=[light_type, light_direction, light_intensity, custom_prompt, user_prompt, is_reset],
391
  queue=False
392
  ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False)
393
 
@@ -419,13 +396,13 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
419
  # Examples - You'll need to add your own example images
420
  gr.Examples(
421
  examples=[
422
- [None, "soft_window", "side", "soft", "", "", 0, True, 1.0, 4, 1024, 1024],
423
- [None, "golden_hour", "front", "medium", "", "", 0, True, 1.0, 4, 1024, 1024],
424
- [None, "dramatic", "side", "strong", "", "", 0, True, 1.0, 4, 1024, 1024],
425
- [None, "neon", "front", "medium", "", "", 0, True, 1.0, 4, 1024, 1024],
426
- [None, "candlelight", "front", "soft", "", "", 0, True, 1.0, 4, 1024, 1024],
427
  ],
428
- inputs=[image, light_type, light_direction, light_intensity, custom_prompt, user_prompt,
429
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width],
430
  outputs=outputs,
431
  fn=infer_relight,
@@ -441,7 +418,7 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
441
  ).then(
442
  fn=reset_all,
443
  inputs=None,
444
- outputs=[light_type, light_direction, light_intensity, custom_prompt, user_prompt, is_reset],
445
  queue=False
446
  ).then(
447
  fn=end_reset,
@@ -462,17 +439,16 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
462
  return result_img, result_seed, result_prompt, gr.update(visible=show_button)
463
 
464
  control_inputs = [
465
- image, light_type, light_direction, light_intensity, custom_prompt, user_prompt,
466
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
467
  ]
468
  control_inputs_with_flag = [is_reset] + control_inputs
469
 
470
  for control in [light_type, light_direction, light_intensity]:
471
- control.change(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
472
 
473
- custom_prompt.change(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
474
- user_prompt.change(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
475
 
476
  run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
477
 
478
- demo.launch()
 
102
  )
103
  return result[0]["video"]
104
 
105
+ def build_relight_prompt(light_type, light_direction, light_intensity, prompt):
106
  """Build the relighting prompt based on user selections."""
107
 
108
+ # Priority 1: User's prompt (translated to Chinese if needed)
109
+ if prompt and prompt.strip():
110
+ translated = translate_to_chinese(prompt)
111
  # Add trigger word if not already present
112
  if "ι‡ζ–°η…§ζ˜Ž" not in translated:
113
  return f"ι‡ζ–°η…§ζ˜Ž,{translated}"
114
  return translated
115
 
116
+ # Priority 2: Build from controls
 
 
 
 
117
  prompt_parts = ["ι‡ζ–°η…§ζ˜Ž"]
118
 
119
  # Light type descriptions
 
169
  light_type,
170
  light_direction,
171
  light_intensity,
172
+ prompt,
 
173
  seed,
174
  randomize_seed,
175
  true_guidance_scale,
 
179
  prev_output = None,
180
  progress=gr.Progress(track_tqdm=True)
181
  ):
182
+ final_prompt = build_relight_prompt(light_type, light_direction, light_intensity, prompt)
183
+ print(f"Generated Prompt: {final_prompt}")
184
 
185
  if randomize_seed:
186
  seed = random.randint(0, MAX_SEED)
 
201
 
202
  result = pipe(
203
  image=pil_images,
204
+ prompt=final_prompt,
205
  height=height if height != 0 else None,
206
  width=width if width != 0 else None,
207
  num_inference_steps=num_inference_steps,
 
210
  num_images_per_prompt=1,
211
  ).images[0]
212
 
213
+ return result, seed, final_prompt
214
 
215
  def create_video_between_images(input_image, output_image, prompt: str, request: gr.Request) -> str:
216
  """Create a video between the input and output images."""
 
245
  #examples{max-width: 800px; margin: 0 auto; }'''
246
 
247
  def reset_all():
248
+ return ["none", "none", "none", "", False, True]
249
 
250
  def end_reset():
251
  return False
 
278
  gr.Markdown("""
279
  Qwen Image Edit 2509 for Image Relighting ✨
280
  Using [dx8152's Qwen-Image-Edit-2509-Relight LoRA](https://huggingface.co/dx8152/Qwen-Image-Edit-2509-Relight) and [linoyts/Qwen-Image-Edit-Rapid-AIO](https://huggingface.co/linoyts/Qwen-Image-Edit-Rapid-AIO) for 4-step inference πŸ’¨
 
 
 
 
 
 
 
281
  """
282
  )
283
 
 
287
  prev_output = gr.Image(value=None, visible=False)
288
  is_reset = gr.Checkbox(value=False, visible=False)
289
 
 
 
 
 
 
 
 
 
 
 
290
  with gr.Tab("Lighting Controls"):
291
  light_type = gr.Dropdown(
292
  label="Light Type",
 
329
  )
330
 
331
  with gr.Tab("Custom Prompt"):
332
+ prompt = gr.Textbox(
333
+ label="Relighting Prompt",
334
+ placeholder="Example: Add warm sunset lighting from the right",
335
  lines=3
336
  )
 
337
 
338
  with gr.Row():
339
  reset_btn = gr.Button("Reset")
 
355
  video_output = gr.Video(label="Generated Video", show_download_button=True, autoplay=True)
356
 
357
  inputs = [
358
+ image, light_type, light_direction, light_intensity, prompt,
359
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
360
  ]
361
  outputs = [result, seed, prompt_preview]
 
364
  reset_btn.click(
365
  fn=reset_all,
366
  inputs=None,
367
+ outputs=[light_type, light_direction, light_intensity, prompt, is_reset],
368
  queue=False
369
  ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False)
370
 
 
396
  # Examples - You'll need to add your own example images
397
  gr.Examples(
398
  examples=[
399
+ [None, "soft_window", "side", "soft", "", 0, True, 1.0, 4, 1024, 1024],
400
+ [None, "golden_hour", "front", "medium", "", 0, True, 1.0, 4, 1024, 1024],
401
+ [None, "dramatic", "side", "strong", "", 0, True, 1.0, 4, 1024, 1024],
402
+ [None, "neon", "front", "medium", "", 0, True, 1.0, 4, 1024, 1024],
403
+ [None, "candlelight", "front", "soft", "", 0, True, 1.0, 4, 1024, 1024],
404
  ],
405
+ inputs=[image, light_type, light_direction, light_intensity, prompt,
406
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width],
407
  outputs=outputs,
408
  fn=infer_relight,
 
418
  ).then(
419
  fn=reset_all,
420
  inputs=None,
421
+ outputs=[light_type, light_direction, light_intensity, prompt, is_reset],
422
  queue=False
423
  ).then(
424
  fn=end_reset,
 
439
  return result_img, result_seed, result_prompt, gr.update(visible=show_button)
440
 
441
  control_inputs = [
442
+ image, light_type, light_direction, light_intensity, prompt,
443
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
444
  ]
445
  control_inputs_with_flag = [is_reset] + control_inputs
446
 
447
  for control in [light_type, light_direction, light_intensity]:
448
+ control.input(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
449
 
450
+ # prompt.change(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
 
451
 
452
  run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
453
 
454
+ demo.launch()